Welcome to my page
This is the learning diary for the course ‘Introduction to Open Data Science 2020’. I would like to further develop my data science skills in R and get familiar with Github.
My GitHub repository is https://github.com/Imangholiloo/IODS-project "
# Let's get the date and time
date()
## [1] "Wed Nov 18 19:28:54 2020"
alternatively, print(Sys.time())
# Let's say hello to you via R!.
for (i in "Hello anyone"){
print (i)}
## [1] "Hello anyone"
# load package
library(rmarkdown)
Mohammad Imangholiloo
20201105
Read the data to R
lrn14 <- read.table("http://www.helsinki.fi/~kvehkala/JYTmooc/JYTOPKYS3-data.txt", sep="\t", header=TRUE)
alternatively, read it from your local drive lrn14 <- read.table("C:/Users/Mohammad/Documents/IODS-project/data/JYTOPKYS3-data.txt", sep="\t", header=TRUE) Or read it from your clipboard using: lrn14 <- readClipboard() metadata (description of data) is here (in Finnish): https://www.mv.helsinki.fi/home/kvehkala/JYTmooc/JYTOPKYS2-meta.txt
FYI: to keep characters as characters in R: use stringsAsFactors=FALSE also in the read file command
# check dimensions of the data
dim(lrn14)
## [1] 183 60
# check structure of the data
str(lrn14)
## 'data.frame': 183 obs. of 60 variables:
## $ Aa : int 3 2 4 4 3 4 4 3 2 3 ...
## $ Ab : int 1 2 1 2 2 2 1 1 1 2 ...
## $ Ac : int 2 2 1 3 2 1 2 2 2 1 ...
## $ Ad : int 1 2 1 2 1 1 2 1 1 1 ...
## $ Ae : int 1 1 1 1 2 1 1 1 1 1 ...
## $ Af : int 1 1 1 1 1 1 1 1 1 2 ...
## $ ST01 : int 4 4 3 3 4 4 5 4 4 4 ...
## $ SU02 : int 2 2 1 3 2 3 2 2 1 2 ...
## $ D03 : int 4 4 4 4 5 5 4 4 5 4 ...
## $ ST04 : int 4 4 4 4 3 4 2 5 5 4 ...
## $ SU05 : int 2 4 2 3 4 3 2 4 2 4 ...
## $ D06 : int 4 2 3 4 4 5 3 3 4 4 ...
## $ D07 : int 4 3 4 4 4 5 4 4 5 4 ...
## $ SU08 : int 3 4 1 2 3 4 4 2 4 2 ...
## $ ST09 : int 3 4 3 3 4 4 2 4 4 4 ...
## $ SU10 : int 2 1 1 1 2 1 1 2 1 2 ...
## $ D11 : int 3 4 4 3 4 5 5 3 4 4 ...
## $ ST12 : int 3 1 4 3 2 3 2 4 4 4 ...
## $ SU13 : int 3 3 2 2 3 1 1 2 1 2 ...
## $ D14 : int 4 2 4 4 4 5 5 4 4 4 ...
## $ D15 : int 3 3 2 3 3 4 2 2 3 4 ...
## $ SU16 : int 2 4 3 2 3 2 3 3 4 4 ...
## $ ST17 : int 3 4 3 3 4 3 4 3 4 4 ...
## $ SU18 : int 2 2 1 1 1 2 1 2 1 2 ...
## $ D19 : int 4 3 4 3 4 4 4 4 5 4 ...
## $ ST20 : int 2 1 3 3 3 3 1 4 4 2 ...
## $ SU21 : int 3 2 2 3 2 4 1 3 2 4 ...
## $ D22 : int 3 2 4 3 3 5 4 2 4 4 ...
## $ D23 : int 2 3 3 3 3 4 3 2 4 4 ...
## $ SU24 : int 2 4 3 2 4 2 2 4 2 4 ...
## $ ST25 : int 4 2 4 3 4 4 1 4 4 4 ...
## $ SU26 : int 4 4 4 2 3 2 1 4 4 4 ...
## $ D27 : int 4 2 3 3 3 5 4 4 5 4 ...
## $ ST28 : int 4 2 5 3 5 4 1 4 5 2 ...
## $ SU29 : int 3 3 2 3 3 2 1 2 1 2 ...
## $ D30 : int 4 3 4 4 3 5 4 3 4 4 ...
## $ D31 : int 4 4 3 4 4 5 4 4 5 4 ...
## $ SU32 : int 3 5 5 3 4 3 4 4 3 4 ...
## $ Ca : int 2 4 3 3 2 3 4 2 3 2 ...
## $ Cb : int 4 4 5 4 4 5 5 4 5 4 ...
## $ Cc : int 3 4 4 4 4 4 4 4 4 4 ...
## $ Cd : int 4 5 4 4 3 4 4 5 5 5 ...
## $ Ce : int 3 5 3 3 3 3 4 3 3 4 ...
## $ Cf : int 2 3 4 4 3 4 5 3 3 4 ...
## $ Cg : int 3 2 4 4 4 5 5 3 5 4 ...
## $ Ch : int 4 4 2 3 4 4 3 3 5 4 ...
## $ Da : int 3 4 1 2 3 3 2 2 4 1 ...
## $ Db : int 4 3 4 4 4 5 4 4 2 4 ...
## $ Dc : int 4 3 4 5 4 4 4 4 4 4 ...
## $ Dd : int 5 4 1 2 4 4 5 3 5 2 ...
## $ De : int 4 3 4 5 4 4 5 4 4 2 ...
## $ Df : int 2 2 1 1 2 3 1 1 4 1 ...
## $ Dg : int 4 3 3 5 5 4 4 4 5 1 ...
## $ Dh : int 3 3 1 4 5 3 4 1 4 1 ...
## $ Di : int 4 2 1 2 3 3 2 1 4 1 ...
## $ Dj : int 4 4 5 5 3 5 4 5 2 4 ...
## $ Age : int 53 55 49 53 49 38 50 37 37 42 ...
## $ Attitude: int 37 31 25 35 37 38 35 29 38 21 ...
## $ Points : int 25 12 24 10 22 21 21 31 24 26 ...
## $ gender : chr "F" "M" "F" "M" ...
As it shows, the data have 186 observations and 63 columns and data structure were also given (all are intiger, except gender which is character).
#3— selecting columns
Name of group of columns (in List) realted to questions under the umbrella of ‘deep’, ‘surface’ and ‘strategic learning’
#install.packages('dplyr')
library(dplyr)
##
## Attaching package: 'dplyr'
## The following objects are masked from 'package:stats':
##
## filter, lag
## The following objects are masked from 'package:base':
##
## intersect, setdiff, setequal, union
deep_questions <- c("D03", "D11", "D19", "D27", "D07", "D14", "D22", "D30", "D06", "D15", "D23", "D31")
surface_questions <- c("SU02","SU10","SU18","SU26", "SU05","SU13","SU21","SU29","SU08","SU16","SU24","SU32")
strategic_questions <- c("ST01","ST09","ST17","ST25","ST04","ST12","ST20","ST28")
# select the columns related to deep learning and create column 'deep' by averaging
deep_columns <- select(lrn14, one_of(deep_questions))
lrn14$deep <- rowMeans(deep_columns)
# select the columns related to surface_questions and create column 'surface' by averaging
surface_columns <- select(lrn14, one_of(surface_questions))
lrn14$surface <- rowMeans(surface_columns)
# select the columns related to strategic_questions and create column 'strategic' by averaging
strategic_columns <- select(lrn14, one_of(strategic_questions))
lrn14$strategic <- rowMeans(strategic_columns)
# select only specific needed columns, made recently
keep_columns <- c("gender","Age","Attitude", "deep", "strategic", "surface", "Points")
needed_columns_2014 <- select(lrn14, one_of(keep_columns))
# check structure of the new df
str(needed_columns_2014)
## 'data.frame': 183 obs. of 7 variables:
## $ gender : chr "F" "M" "F" "M" ...
## $ Age : int 53 55 49 53 49 38 50 37 37 42 ...
## $ Attitude : int 37 31 25 35 37 38 35 29 38 21 ...
## $ deep : num 3.58 2.92 3.5 3.5 3.67 ...
## $ strategic: num 3.38 2.75 3.62 3.12 3.62 ...
## $ surface : num 2.58 3.17 2.25 2.25 2.83 ...
## $ Points : int 25 12 24 10 22 21 21 31 24 26 ...
# Modfiy columns names, let's make all names smallcase (not capital)
colnames(needed_columns_2014)[2] <- "age"
colnames(needed_columns_2014)[3] <- "attitude"
colnames(needed_columns_2014)[7] <- "points"
#check if worked fine
str(needed_columns_2014)
## 'data.frame': 183 obs. of 7 variables:
## $ gender : chr "F" "M" "F" "M" ...
## $ age : int 53 55 49 53 49 38 50 37 37 42 ...
## $ attitude : int 37 31 25 35 37 38 35 29 38 21 ...
## $ deep : num 3.58 2.92 3.5 3.5 3.67 ...
## $ strategic: num 3.38 2.75 3.62 3.12 3.62 ...
## $ surface : num 2.58 3.17 2.25 2.25 2.83 ...
## $ points : int 25 12 24 10 22 21 21 31 24 26 ...
#excluding rows (observations) with point in exam >0
needed_columns_2014 <- filter(needed_columns_2014, points > 0) #alternatively can be points *** != 0 ***
#let's check descriptive staticial summary of columns, for FYI
summary(needed_columns_2014)
## gender age attitude deep
## Length:166 Min. :17.00 Min. :14.00 Min. :1.583
## Class :character 1st Qu.:21.00 1st Qu.:26.00 1st Qu.:3.333
## Mode :character Median :22.00 Median :32.00 Median :3.667
## Mean :25.51 Mean :31.43 Mean :3.680
## 3rd Qu.:27.00 3rd Qu.:37.00 3rd Qu.:4.083
## Max. :55.00 Max. :50.00 Max. :4.917
## strategic surface points
## Min. :1.250 Min. :1.583 Min. : 7.00
## 1st Qu.:2.625 1st Qu.:2.417 1st Qu.:19.00
## Median :3.188 Median :2.833 Median :23.00
## Mean :3.121 Mean :2.787 Mean :22.72
## 3rd Qu.:3.625 3rd Qu.:3.167 3rd Qu.:27.75
## Max. :5.000 Max. :4.333 Max. :33.00
View(needed_columns_2014) #to view the table in R
#4—
#set working dir
setwd("C:/Users/Mohammad/Documents/IODS-project/data/")
#Save the analysis dataset to the ‘data’ folder
write.csv(needed_columns_2014, "learning2014.csv")
#for the sake of being sure, read the data again!
learning2014_read_again <- read.csv("C:/Users/Mohammad/Documents/IODS-project/data/learning2014.csv")
head(learning2014_read_again)
## X gender age attitude deep strategic surface points
## 1 1 F 53 37 3.583333 3.375 2.583333 25
## 2 2 M 55 31 2.916667 2.750 3.166667 12
## 3 3 F 49 25 3.500000 3.625 2.250000 24
## 4 4 M 53 35 3.500000 3.125 2.250000 10
## 5 5 M 49 37 3.666667 3.625 2.833333 22
## 6 6 F 38 38 4.750000 3.625 2.416667 21
tail(learning2014_read_again)
## X gender age attitude deep strategic surface points
## 161 161 F 19 20 4.083333 3.375 2.833333 20
## 162 162 F 22 42 2.916667 1.750 3.166667 28
## 163 163 M 35 41 3.833333 3.000 2.750000 31
## 164 164 F 18 37 3.166667 2.625 3.416667 18
## 165 165 F 19 36 3.416667 2.625 3.000000 30
## 166 166 M 21 18 4.083333 3.375 2.666667 19
str(learning2014_read_again)
## 'data.frame': 166 obs. of 8 variables:
## $ X : int 1 2 3 4 5 6 7 8 9 10 ...
## $ gender : chr "F" "M" "F" "M" ...
## $ age : int 53 55 49 53 49 38 50 37 37 42 ...
## $ attitude : int 37 31 25 35 37 38 35 29 38 21 ...
## $ deep : num 3.58 2.92 3.5 3.5 3.67 ...
## $ strategic: num 3.38 2.75 3.62 3.12 3.62 ...
## $ surface : num 2.58 3.17 2.25 2.25 2.83 ...
## $ points : int 25 12 24 10 22 21 21 31 24 26 ...
summary(learning2014_read_again)
## X gender age attitude
## Min. : 1.00 Length:166 Min. :17.00 Min. :14.00
## 1st Qu.: 42.25 Class :character 1st Qu.:21.00 1st Qu.:26.00
## Median : 83.50 Mode :character Median :22.00 Median :32.00
## Mean : 83.50 Mean :25.51 Mean :31.43
## 3rd Qu.:124.75 3rd Qu.:27.00 3rd Qu.:37.00
## Max. :166.00 Max. :55.00 Max. :50.00
## deep strategic surface points
## Min. :1.583 Min. :1.250 Min. :1.583 Min. : 7.00
## 1st Qu.:3.333 1st Qu.:2.625 1st Qu.:2.417 1st Qu.:19.00
## Median :3.667 Median :3.188 Median :2.833 Median :23.00
## Mean :3.680 Mean :3.121 Mean :2.787 Mean :22.72
## 3rd Qu.:4.083 3rd Qu.:3.625 3rd Qu.:3.167 3rd Qu.:27.75
## Max. :4.917 Max. :5.000 Max. :4.333 Max. :33.00
read the data
learning2014 <- read.csv("C:/Users/Mohammad/Documents/IODS-project/data/learning2014_MI.csv")
#Metadata can be found in https://www.mv.helsinki.fi/home/kvehkala/JYTmooc/JYTOPKYS3-meta.txt"
#alternatively by:
#learning2014 <- read.table("http://s3.amazonaws.com/assets.datacamp.com/production/course_2218/datasets/learning2014.txt", header=TRUE, sep = ",")
** Description about data: **
This dateset is gather by Kimmo Vehkalahti in Introduction to Social Statistics, fall 2014, funded by Teachers Academy funding for him (2013-2015)
str(learning2014)
## 'data.frame': 166 obs. of 8 variables:
## $ X : int 1 2 3 4 5 6 7 8 9 10 ...
## $ gender : chr "F" "M" "F" "M" ...
## $ age : int 53 55 49 53 49 38 50 37 37 42 ...
## $ attitude: num 3.7 3.1 2.5 3.5 3.7 3.8 3.5 2.9 3.8 2.1 ...
## $ deep : num 3.58 2.92 3.5 3.5 3.67 ...
## $ stra : num 3.38 2.75 3.62 3.12 3.62 ...
## $ surf : num 2.58 3.17 2.25 2.25 2.83 ...
## $ points : int 25 12 24 10 22 21 21 31 24 26 ...
View(learning2014)
dim(learning2014) #this dataset contains 166 row and 8 columns
## [1] 166 8
colnames(learning2014) #column names are printed in consule
## [1] "X" "gender" "age" "attitude" "deep" "stra" "surf"
## [8] "points"
summary(learning2014) #a short view about descriptive staticialfeastures of the columns, e.g. mean, min, max, etc
## X gender age attitude
## Min. : 1.00 Length:166 Min. :17.00 Min. :1.400
## 1st Qu.: 42.25 Class :character 1st Qu.:21.00 1st Qu.:2.600
## Median : 83.50 Mode :character Median :22.00 Median :3.200
## Mean : 83.50 Mean :25.51 Mean :3.143
## 3rd Qu.:124.75 3rd Qu.:27.00 3rd Qu.:3.700
## Max. :166.00 Max. :55.00 Max. :5.000
## deep stra surf points
## Min. :1.583 Min. :1.250 Min. :1.583 Min. : 7.00
## 1st Qu.:3.333 1st Qu.:2.625 1st Qu.:2.417 1st Qu.:19.00
## Median :3.667 Median :3.188 Median :2.833 Median :23.00
## Mean :3.680 Mean :3.121 Mean :2.787 Mean :22.72
## 3rd Qu.:4.083 3rd Qu.:3.625 3rd Qu.:3.167 3rd Qu.:27.75
## Max. :4.917 Max. :5.000 Max. :4.333 Max. :33.00
table(learning2014$gender) # to show the number of participants by gender"
##
## F M
## 110 56
table(learning2014$age) # to show the number of participants by age"
##
## 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 37 38 39 40 42 44 45
## 1 3 7 25 31 20 12 7 10 6 3 2 4 5 3 1 3 3 2 4 2 1 1 1 1 1
## 48 49 50 53 55
## 1 2 1 2 1
#2—
#graphical shows interactions (i.e. correlations and distrbutions) of columns (e.g. variables) of this dataset togehter
plot(learning2014[c(-1,-2)]) #[c(-1,-2)] >>> is to remove indexcolumn and gender (strings) elements of df, source: https://statisticsglobe.com/remove-element-from-list-in-r
hist(learning2014$age, col='grey') # to check histogram of a column (e.g. age)
A loop to automaticlly plot histogram of all variables NB: Check that variables are all numberical, otherwise in gender (male/female, this will stop)
colnames(learning2014[-2]) #[-2] is to remove an element of list, source: https://statisticsglobe.com/remove-element-from-list-in-r
## [1] "X" "age" "attitude" "deep" "stra" "surf" "points"
for (i in colnames(learning2014[-2])){
print(i)
hist(learning2014[[i]], col='grey')
}
## [1] "X"
## [1] "age"
## [1] "attitude"
## [1] "deep"
## [1] "stra"
## [1] "surf"
## [1] "points"
** Visualize with ggplot**
#install and load it
#install.packages("ggplot2")
library(ggplot2)
# setup plot with data and aesthetic mapping
p1 <- ggplot(learning2014, aes(x = attitude, y = points, col = gender))
# select visualization type (points)
p2 <- p1 + geom_point()
# draw the plot
p2
# add a regression line
p3 <- p2 + geom_smooth(method = "lm")
# add a main title and draw the plot
p4 <- p3 + ggtitle("Student's attitude versus exam points")
p4
## `geom_smooth()` using formula 'y ~ x'
Interpretation:
As graph shows, there is a positive strong correlation between student’s attitude and their exam points the correlation was more stronger with Male student as the slop is higher than female. So, attitude of male student indulents the exame point more than attitue of females
If so, read this section :)
Draw a scatter plot matrix
#pairs(learning2014[c(-1:-2)])# ___c(-1:-2)___, is to remove those column
pairs(learning2014[c(-1:-2)], col= c("black","red"))#learning2014$gender)
F: black, M: red
If yes, use GGally and ggplot2 libraries to create even more advanced plot matrix with ggpairs()
library(GGally)
## Registered S3 method overwritten by 'GGally':
## method from
## +.gg ggplot2
library(ggplot2)
p <- ggpairs(learning2014[c(-1:-2)], mapping = aes(), lower = list(combo = wrap("facethist", bins = 20)))
p
I hope you got facinated and liked it like me :)
The graph shows a (strong) negative correlation (r2= -324) between surface and deep learning2014
#3—
Now that we got familiar with variables and their distribution and correction, it’s time to fit a regression model
Let’s create a scatter plot of points versus attitude variables
qplot(attitude, points, data = learning2014) + geom_smooth(method = "lm")
## `geom_smooth()` using formula 'y ~ x'
# let's make a simple linear regression model between exam points and attitude variables
my_model <- lm(points ~ attitude, data = learning2014)
# print out a summary of the model
summary(my_model)
##
## Call:
## lm(formula = points ~ attitude, data = learning2014)
##
## Residuals:
## Min 1Q Median 3Q Max
## -16.9763 -3.2119 0.4339 4.1534 10.6645
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 11.6372 1.8303 6.358 1.95e-09 ***
## attitude 3.5255 0.5674 6.214 4.12e-09 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 5.32 on 164 degrees of freedom
## Multiple R-squared: 0.1906, Adjusted R-squared: 0.1856
## F-statistic: 38.61 on 1 and 164 DF, p-value: 4.119e-09
Interpretation: This model shows that attitude has statistically significant relationship with the target variable, and can be used to predict the exam points, becuase Pr(>|t|) values are way below 0.05, thus, our model is statistically significant in the significance level of 95%, in fact, this case is even valid in the significance level of >99%, as the values are <0.01" Our model will be in fact this: points = 3.5255*attitude+11.6372, as linear model is y=ax+b, where a is coeffient of the variable x, and b is the intercept, both can be derive from model summary"
Let’s select variables f interest and plo them:
ggpairs(learning2014, lower = list(combo = wrap("facethist", bins = 20)))
# create a regression model with multiple explanatory variables
my_model2 <- lm(points ~ attitude + stra + surf, data = learning2014)
# print out a summary of the model
summary(my_model2)
##
## Call:
## lm(formula = points ~ attitude + stra + surf, data = learning2014)
##
## Residuals:
## Min 1Q Median 3Q Max
## -17.1550 -3.4346 0.5156 3.6401 10.8952
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 11.0171 3.6837 2.991 0.00322 **
## attitude 3.3952 0.5741 5.913 1.93e-08 ***
## stra 0.8531 0.5416 1.575 0.11716
## surf -0.5861 0.8014 -0.731 0.46563
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 5.296 on 162 degrees of freedom
## Multiple R-squared: 0.2074, Adjusted R-squared: 0.1927
## F-statistic: 14.13 on 3 and 162 DF, p-value: 3.156e-08
In this model now, we tried to predict the exam point using attitude, stra, and surf variables, but as the results show. only attitude has statistically significant relation with the exam model, thus, we shall change the other variables
#4—
It seems that there is not significant relationship between exam point and stratigic learnign and surface. So we shall rmeove them
my_model3 <- lm(points ~ attitude + stra+ deep +age, data = learning2014)
summary(my_model3)
##
## Call:
## lm(formula = points ~ attitude + stra + deep + age, data = learning2014)
##
## Residuals:
## Min 1Q Median 3Q Max
## -17.9941 -3.0839 0.5037 3.5519 11.3298
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 13.24374 3.57079 3.709 0.000286 ***
## attitude 3.53884 0.56538 6.259 3.37e-09 ***
## stra 1.05032 0.53652 1.958 0.051998 .
## deep -0.73226 0.74677 -0.981 0.328273
## age -0.08750 0.05303 -1.650 0.100866
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 5.261 on 161 degrees of freedom
## Multiple R-squared: 0.2228, Adjusted R-squared: 0.2035
## F-statistic: 11.54 on 4 and 161 DF, p-value: 2.915e-08
As model summary shows, still other variables such as age, deep, stra are not significant, but stra is very close to significance level of 95%, as it is Pr(>|t|) value is just a bit higher than 0.05 (so, very close
#5—
my_model4 <- lm(points ~ attitude + stra, data = learning2014)
summary(my_model4)
##
## Call:
## lm(formula = points ~ attitude + stra, data = learning2014)
##
## Residuals:
## Min 1Q Median 3Q Max
## -17.6436 -3.3113 0.5575 3.7928 10.9295
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 8.9729 2.3959 3.745 0.00025 ***
## attitude 3.4658 0.5652 6.132 6.31e-09 ***
## stra 0.9137 0.5345 1.709 0.08927 .
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 5.289 on 163 degrees of freedom
## Multiple R-squared: 0.2048, Adjusted R-squared: 0.1951
## F-statistic: 20.99 on 2 and 163 DF, p-value: 7.734e-09
Interpretation
Same conclusion as above, stra is not statistically significant (within significance level of 95%), so we could either remove it from equation (preffered), or keep it for the purpose of this practical, to have multi-variable regression.
# draw diagnostic plots using the plot() function. Choose the plots 1, 2 and 5
par(mfrow = c(2,2)) #defines that plots go to 2*2 frame (joining plots together)
plot(my_model4, which = c(1,2,5))
Interpretation
In linear regression, we assume:
1. linear relationship between variables and target variable
2. error are distributed normally
3. Errors do not correlated
4. errors have constant variance
5. the size of given error does not depend of the explanatory variables.
So, we can still check them as following:
To explore the normality assumption, QQ-plot can help us. As it shows, there is a reasonable fit between the dots and the line. Thus, it was normally distrbuted.
To analyse the assumption of constant variance of errors (#4 above), we can check residuals vs fitted plot. It shows a reasonable random spead/distribution of dots around the line. So, this assumtion was also valid.
In other words, Residual vs Fitted plot confirm that the fitted model is appropriate becuase the variabilty of residual is not increasing with increase of fitted value.
Residuals vs Leverage plot helps identify which observation have an unusual high impact, like outlier. It seems that there is no such outlier in this data
more info about interpretations: https://vimeo.com/199820384
Let’s also check the residuales of the model using a boxplot. The closer to 0 the better the model. In other words, it shows the symmetry and specifically the normality of the error terms in the regression model.
boxplot(my_model4$residuals,
main = "Residuals of the model",
xlab = "Residual values",
ylab = "",
col = "orange",
border = "brown",
horizontal = F,
notch = F)
thank you and all the best
Describe the work you have done this week and summarize your learning.
date()
## [1] "Wed Nov 18 19:29:13 2020"
Here we go again…
Welcome to my page
This is the learning diary for the course ‘Introduction to Open Data Science 2020’. I would like to further develop my data science skills in R and get familiar with Github.
My GitHub repository is https://github.com/Imangholiloo/IODS-project
# Let's get the date and time
date()
## [1] "Wed Nov 18 19:29:13 2020"
alternatively, print(Sys.time())
# Let's say hello to you via R!.
for (i in "Hello anyone"){
print (i)}
## [1] "Hello anyone"
# load package
library(rmarkdown)
Mohammad Imangholiloo
20201112
In this script we assess Student Performance Data Set from ___URL: https://archive.ics.uci.edu/ml/datasets/Student+Performance___
Data description:
This data approach student achievement in secondary education of two Portuguese schools. The data attributes include student grades, demographic, social and school related features) and it was collected by using school reports and questionnaires. Two datasets are provided regarding the performance in two distinct subjects: Mathematics (mat) and Portuguese language (por). ___more info: https://archive.ics.uci.edu/ml/datasets/Student+Performance___
# Read the data to R
# Set working directory
setwd("C:/Users/Mohammad/Documents/IODS-project/data")
# read math class questionnaire data into memory
student_mat=read.csv("student-mat.csv",sep=";",header=TRUE)
# Read portuguese class questionnaire data into memory
student_por=read.csv("student-por.csv",sep=";",header=TRUE)
# check dimensions of both data
dim(student_mat)
## [1] 395 33
dim(student_por)
## [1] 649 33
# check structure of both data
str(student_mat)
## 'data.frame': 395 obs. of 33 variables:
## $ school : chr "GP" "GP" "GP" "GP" ...
## $ sex : chr "F" "F" "F" "F" ...
## $ age : int 18 17 15 15 16 16 16 17 15 15 ...
## $ address : chr "U" "U" "U" "U" ...
## $ famsize : chr "GT3" "GT3" "LE3" "GT3" ...
## $ Pstatus : chr "A" "T" "T" "T" ...
## $ Medu : int 4 1 1 4 3 4 2 4 3 3 ...
## $ Fedu : int 4 1 1 2 3 3 2 4 2 4 ...
## $ Mjob : chr "at_home" "at_home" "at_home" "health" ...
## $ Fjob : chr "teacher" "other" "other" "services" ...
## $ reason : chr "course" "course" "other" "home" ...
## $ guardian : chr "mother" "father" "mother" "mother" ...
## $ traveltime: int 2 1 1 1 1 1 1 2 1 1 ...
## $ studytime : int 2 2 2 3 2 2 2 2 2 2 ...
## $ failures : int 0 0 3 0 0 0 0 0 0 0 ...
## $ schoolsup : chr "yes" "no" "yes" "no" ...
## $ famsup : chr "no" "yes" "no" "yes" ...
## $ paid : chr "no" "no" "yes" "yes" ...
## $ activities: chr "no" "no" "no" "yes" ...
## $ nursery : chr "yes" "no" "yes" "yes" ...
## $ higher : chr "yes" "yes" "yes" "yes" ...
## $ internet : chr "no" "yes" "yes" "yes" ...
## $ romantic : chr "no" "no" "no" "yes" ...
## $ famrel : int 4 5 4 3 4 5 4 4 4 5 ...
## $ freetime : int 3 3 3 2 3 4 4 1 2 5 ...
## $ goout : int 4 3 2 2 2 2 4 4 2 1 ...
## $ Dalc : int 1 1 2 1 1 1 1 1 1 1 ...
## $ Walc : int 1 1 3 1 2 2 1 1 1 1 ...
## $ health : int 3 3 3 5 5 5 3 1 1 5 ...
## $ absences : int 6 4 10 2 4 10 0 6 0 0 ...
## $ G1 : int 5 5 7 15 6 15 12 6 16 14 ...
## $ G2 : int 6 5 8 14 10 15 12 5 18 15 ...
## $ G3 : int 6 6 10 15 10 15 11 6 19 15 ...
str(student_por)
## 'data.frame': 649 obs. of 33 variables:
## $ school : chr "GP" "GP" "GP" "GP" ...
## $ sex : chr "F" "F" "F" "F" ...
## $ age : int 18 17 15 15 16 16 16 17 15 15 ...
## $ address : chr "U" "U" "U" "U" ...
## $ famsize : chr "GT3" "GT3" "LE3" "GT3" ...
## $ Pstatus : chr "A" "T" "T" "T" ...
## $ Medu : int 4 1 1 4 3 4 2 4 3 3 ...
## $ Fedu : int 4 1 1 2 3 3 2 4 2 4 ...
## $ Mjob : chr "at_home" "at_home" "at_home" "health" ...
## $ Fjob : chr "teacher" "other" "other" "services" ...
## $ reason : chr "course" "course" "other" "home" ...
## $ guardian : chr "mother" "father" "mother" "mother" ...
## $ traveltime: int 2 1 1 1 1 1 1 2 1 1 ...
## $ studytime : int 2 2 2 3 2 2 2 2 2 2 ...
## $ failures : int 0 0 0 0 0 0 0 0 0 0 ...
## $ schoolsup : chr "yes" "no" "yes" "no" ...
## $ famsup : chr "no" "yes" "no" "yes" ...
## $ paid : chr "no" "no" "no" "no" ...
## $ activities: chr "no" "no" "no" "yes" ...
## $ nursery : chr "yes" "no" "yes" "yes" ...
## $ higher : chr "yes" "yes" "yes" "yes" ...
## $ internet : chr "no" "yes" "yes" "yes" ...
## $ romantic : chr "no" "no" "no" "yes" ...
## $ famrel : int 4 5 4 3 4 5 4 4 4 5 ...
## $ freetime : int 3 3 3 2 3 4 4 1 2 5 ...
## $ goout : int 4 3 2 2 2 2 4 4 2 1 ...
## $ Dalc : int 1 1 2 1 1 1 1 1 1 1 ...
## $ Walc : int 1 1 3 1 2 2 1 1 1 1 ...
## $ health : int 3 3 3 5 5 5 3 1 1 5 ...
## $ absences : int 4 2 6 0 0 6 0 2 0 0 ...
## $ G1 : int 0 9 12 14 11 12 13 10 15 12 ...
## $ G2 : int 11 11 13 14 13 12 12 13 16 12 ...
## $ G3 : int 11 11 12 14 13 13 13 13 17 13 ...
# check first 5 rows of both data
head(student_mat)
## school sex age address famsize Pstatus Medu Fedu Mjob Fjob reason
## 1 GP F 18 U GT3 A 4 4 at_home teacher course
## 2 GP F 17 U GT3 T 1 1 at_home other course
## 3 GP F 15 U LE3 T 1 1 at_home other other
## 4 GP F 15 U GT3 T 4 2 health services home
## 5 GP F 16 U GT3 T 3 3 other other home
## 6 GP M 16 U LE3 T 4 3 services other reputation
## guardian traveltime studytime failures schoolsup famsup paid activities
## 1 mother 2 2 0 yes no no no
## 2 father 1 2 0 no yes no no
## 3 mother 1 2 3 yes no yes no
## 4 mother 1 3 0 no yes yes yes
## 5 father 1 2 0 no yes yes no
## 6 mother 1 2 0 no yes yes yes
## nursery higher internet romantic famrel freetime goout Dalc Walc health
## 1 yes yes no no 4 3 4 1 1 3
## 2 no yes yes no 5 3 3 1 1 3
## 3 yes yes yes no 4 3 2 2 3 3
## 4 yes yes yes yes 3 2 2 1 1 5
## 5 yes yes no no 4 3 2 1 2 5
## 6 yes yes yes no 5 4 2 1 2 5
## absences G1 G2 G3
## 1 6 5 6 6
## 2 4 5 5 6
## 3 10 7 8 10
## 4 2 15 14 15
## 5 4 6 10 10
## 6 10 15 15 15
head(student_por)
## school sex age address famsize Pstatus Medu Fedu Mjob Fjob reason
## 1 GP F 18 U GT3 A 4 4 at_home teacher course
## 2 GP F 17 U GT3 T 1 1 at_home other course
## 3 GP F 15 U LE3 T 1 1 at_home other other
## 4 GP F 15 U GT3 T 4 2 health services home
## 5 GP F 16 U GT3 T 3 3 other other home
## 6 GP M 16 U LE3 T 4 3 services other reputation
## guardian traveltime studytime failures schoolsup famsup paid activities
## 1 mother 2 2 0 yes no no no
## 2 father 1 2 0 no yes no no
## 3 mother 1 2 0 yes no no no
## 4 mother 1 3 0 no yes no yes
## 5 father 1 2 0 no yes no no
## 6 mother 1 2 0 no yes no yes
## nursery higher internet romantic famrel freetime goout Dalc Walc health
## 1 yes yes no no 4 3 4 1 1 3
## 2 no yes yes no 5 3 3 1 1 3
## 3 yes yes yes no 4 3 2 2 3 3
## 4 yes yes yes yes 3 2 2 1 1 5
## 5 yes yes no no 4 3 2 1 2 5
## 6 yes yes yes no 5 4 2 1 2 5
## absences G1 G2 G3
## 1 4 0 11 11
## 2 2 9 11 11
## 3 6 12 13 12
## 4 0 14 14 14
## 5 0 11 13 13
## 6 6 12 12 13
# check last 5 rows of both data
tail(student_mat)
## school sex age address famsize Pstatus Medu Fedu Mjob Fjob reason
## 390 MS F 18 U GT3 T 1 1 other other course
## 391 MS M 20 U LE3 A 2 2 services services course
## 392 MS M 17 U LE3 T 3 1 services services course
## 393 MS M 21 R GT3 T 1 1 other other course
## 394 MS M 18 R LE3 T 3 2 services other course
## 395 MS M 19 U LE3 T 1 1 other at_home course
## guardian traveltime studytime failures schoolsup famsup paid activities
## 390 mother 2 2 1 no no no yes
## 391 other 1 2 2 no yes yes no
## 392 mother 2 1 0 no no no no
## 393 other 1 1 3 no no no no
## 394 mother 3 1 0 no no no no
## 395 father 1 1 0 no no no no
## nursery higher internet romantic famrel freetime goout Dalc Walc health
## 390 yes yes no no 1 1 1 1 1 5
## 391 yes yes no no 5 5 4 4 5 4
## 392 no yes yes no 2 4 5 3 4 2
## 393 no yes no no 5 5 3 3 3 3
## 394 no yes yes no 4 4 1 3 4 5
## 395 yes yes yes no 3 2 3 3 3 5
## absences G1 G2 G3
## 390 0 6 5 0
## 391 11 9 9 9
## 392 3 14 16 16
## 393 3 10 8 7
## 394 0 11 12 10
## 395 5 8 9 9
tail(student_por)
## school sex age address famsize Pstatus Medu Fedu Mjob Fjob
## 644 MS F 18 R GT3 T 4 4 teacher at_home
## 645 MS F 19 R GT3 T 2 3 services other
## 646 MS F 18 U LE3 T 3 1 teacher services
## 647 MS F 18 U GT3 T 1 1 other other
## 648 MS M 17 U LE3 T 3 1 services services
## 649 MS M 18 R LE3 T 3 2 services other
## reason guardian traveltime studytime failures schoolsup famsup paid
## 644 reputation mother 3 1 0 no yes no
## 645 course mother 1 3 1 no no no
## 646 course mother 1 2 0 no yes no
## 647 course mother 2 2 0 no no no
## 648 course mother 2 1 0 no no no
## 649 course mother 3 1 0 no no no
## activities nursery higher internet romantic famrel freetime goout Dalc Walc
## 644 yes yes yes yes yes 4 4 3 2 2
## 645 yes no yes yes no 5 4 2 1 2
## 646 no yes yes yes no 4 3 4 1 1
## 647 yes yes yes no no 1 1 1 1 1
## 648 no no yes yes no 2 4 5 3 4
## 649 no no yes yes no 4 4 1 3 4
## health absences G1 G2 G3
## 644 5 4 7 9 10
## 645 5 4 10 11 10
## 646 1 4 15 15 16
## 647 5 6 11 12 9
## 648 2 6 10 10 10
## 649 5 4 10 11 11
#a short view about descriptive statistical features of the columns, e.g. mean, min, max, etc.
summary(student_mat)
## school sex age address
## Length:395 Length:395 Min. :15.0 Length:395
## Class :character Class :character 1st Qu.:16.0 Class :character
## Mode :character Mode :character Median :17.0 Mode :character
## Mean :16.7
## 3rd Qu.:18.0
## Max. :22.0
## famsize Pstatus Medu Fedu
## Length:395 Length:395 Min. :0.000 Min. :0.000
## Class :character Class :character 1st Qu.:2.000 1st Qu.:2.000
## Mode :character Mode :character Median :3.000 Median :2.000
## Mean :2.749 Mean :2.522
## 3rd Qu.:4.000 3rd Qu.:3.000
## Max. :4.000 Max. :4.000
## Mjob Fjob reason guardian
## Length:395 Length:395 Length:395 Length:395
## Class :character Class :character Class :character Class :character
## Mode :character Mode :character Mode :character Mode :character
##
##
##
## traveltime studytime failures schoolsup
## Min. :1.000 Min. :1.000 Min. :0.0000 Length:395
## 1st Qu.:1.000 1st Qu.:1.000 1st Qu.:0.0000 Class :character
## Median :1.000 Median :2.000 Median :0.0000 Mode :character
## Mean :1.448 Mean :2.035 Mean :0.3342
## 3rd Qu.:2.000 3rd Qu.:2.000 3rd Qu.:0.0000
## Max. :4.000 Max. :4.000 Max. :3.0000
## famsup paid activities nursery
## Length:395 Length:395 Length:395 Length:395
## Class :character Class :character Class :character Class :character
## Mode :character Mode :character Mode :character Mode :character
##
##
##
## higher internet romantic famrel
## Length:395 Length:395 Length:395 Min. :1.000
## Class :character Class :character Class :character 1st Qu.:4.000
## Mode :character Mode :character Mode :character Median :4.000
## Mean :3.944
## 3rd Qu.:5.000
## Max. :5.000
## freetime goout Dalc Walc
## Min. :1.000 Min. :1.000 Min. :1.000 Min. :1.000
## 1st Qu.:3.000 1st Qu.:2.000 1st Qu.:1.000 1st Qu.:1.000
## Median :3.000 Median :3.000 Median :1.000 Median :2.000
## Mean :3.235 Mean :3.109 Mean :1.481 Mean :2.291
## 3rd Qu.:4.000 3rd Qu.:4.000 3rd Qu.:2.000 3rd Qu.:3.000
## Max. :5.000 Max. :5.000 Max. :5.000 Max. :5.000
## health absences G1 G2
## Min. :1.000 Min. : 0.000 Min. : 3.00 Min. : 0.00
## 1st Qu.:3.000 1st Qu.: 0.000 1st Qu.: 8.00 1st Qu.: 9.00
## Median :4.000 Median : 4.000 Median :11.00 Median :11.00
## Mean :3.554 Mean : 5.709 Mean :10.91 Mean :10.71
## 3rd Qu.:5.000 3rd Qu.: 8.000 3rd Qu.:13.00 3rd Qu.:13.00
## Max. :5.000 Max. :75.000 Max. :19.00 Max. :19.00
## G3
## Min. : 0.00
## 1st Qu.: 8.00
## Median :11.00
## Mean :10.42
## 3rd Qu.:14.00
## Max. :20.00
summary(student_por)
## school sex age address
## Length:649 Length:649 Min. :15.00 Length:649
## Class :character Class :character 1st Qu.:16.00 Class :character
## Mode :character Mode :character Median :17.00 Mode :character
## Mean :16.74
## 3rd Qu.:18.00
## Max. :22.00
## famsize Pstatus Medu Fedu
## Length:649 Length:649 Min. :0.000 Min. :0.000
## Class :character Class :character 1st Qu.:2.000 1st Qu.:1.000
## Mode :character Mode :character Median :2.000 Median :2.000
## Mean :2.515 Mean :2.307
## 3rd Qu.:4.000 3rd Qu.:3.000
## Max. :4.000 Max. :4.000
## Mjob Fjob reason guardian
## Length:649 Length:649 Length:649 Length:649
## Class :character Class :character Class :character Class :character
## Mode :character Mode :character Mode :character Mode :character
##
##
##
## traveltime studytime failures schoolsup
## Min. :1.000 Min. :1.000 Min. :0.0000 Length:649
## 1st Qu.:1.000 1st Qu.:1.000 1st Qu.:0.0000 Class :character
## Median :1.000 Median :2.000 Median :0.0000 Mode :character
## Mean :1.569 Mean :1.931 Mean :0.2219
## 3rd Qu.:2.000 3rd Qu.:2.000 3rd Qu.:0.0000
## Max. :4.000 Max. :4.000 Max. :3.0000
## famsup paid activities nursery
## Length:649 Length:649 Length:649 Length:649
## Class :character Class :character Class :character Class :character
## Mode :character Mode :character Mode :character Mode :character
##
##
##
## higher internet romantic famrel
## Length:649 Length:649 Length:649 Min. :1.000
## Class :character Class :character Class :character 1st Qu.:4.000
## Mode :character Mode :character Mode :character Median :4.000
## Mean :3.931
## 3rd Qu.:5.000
## Max. :5.000
## freetime goout Dalc Walc health
## Min. :1.00 Min. :1.000 Min. :1.000 Min. :1.00 Min. :1.000
## 1st Qu.:3.00 1st Qu.:2.000 1st Qu.:1.000 1st Qu.:1.00 1st Qu.:2.000
## Median :3.00 Median :3.000 Median :1.000 Median :2.00 Median :4.000
## Mean :3.18 Mean :3.185 Mean :1.502 Mean :2.28 Mean :3.536
## 3rd Qu.:4.00 3rd Qu.:4.000 3rd Qu.:2.000 3rd Qu.:3.00 3rd Qu.:5.000
## Max. :5.00 Max. :5.000 Max. :5.000 Max. :5.00 Max. :5.000
## absences G1 G2 G3
## Min. : 0.000 Min. : 0.0 Min. : 0.00 Min. : 0.00
## 1st Qu.: 0.000 1st Qu.:10.0 1st Qu.:10.00 1st Qu.:10.00
## Median : 2.000 Median :11.0 Median :11.00 Median :12.00
## Mean : 3.659 Mean :11.4 Mean :11.57 Mean :11.91
## 3rd Qu.: 6.000 3rd Qu.:13.0 3rd Qu.:13.00 3rd Qu.:14.00
## Max. :32.000 Max. :19.0 Max. :19.00 Max. :19.00
#column names are printed in console
colnames(student_mat)
## [1] "school" "sex" "age" "address" "famsize"
## [6] "Pstatus" "Medu" "Fedu" "Mjob" "Fjob"
## [11] "reason" "guardian" "traveltime" "studytime" "failures"
## [16] "schoolsup" "famsup" "paid" "activities" "nursery"
## [21] "higher" "internet" "romantic" "famrel" "freetime"
## [26] "goout" "Dalc" "Walc" "health" "absences"
## [31] "G1" "G2" "G3"
colnames(student_por)
## [1] "school" "sex" "age" "address" "famsize"
## [6] "Pstatus" "Medu" "Fedu" "Mjob" "Fjob"
## [11] "reason" "guardian" "traveltime" "studytime" "failures"
## [16] "schoolsup" "famsup" "paid" "activities" "nursery"
## [21] "higher" "internet" "romantic" "famrel" "freetime"
## [26] "goout" "Dalc" "Walc" "health" "absences"
## [31] "G1" "G2" "G3"
#to show the number of observations based on their school name
table(student_mat$school)
##
## GP MS
## 349 46
table(student_por$school)
##
## GP MS
## 423 226
Merge two datasets usig dplyr library
# load library
library(dplyr)
# make a list of common columns to be used as identifiers in both datasets when merging
join_by_columns <- c("school","sex","age","address","famsize","Pstatus","Medu",
"Fedu","Mjob","Fjob","reason","nursery","internet")
# join the two datasets by inner join function of the dplyr library, using the selected identifiers
# Note: inner_join keeps only rows (observations) that arein both input datasets
students_math_por_joint <- inner_join(student_mat, student_por,
by = join_by_columns, suffix = c("_math", "_por"))
#check dimention of newly merged dataset
dim(students_math_por_joint) # 382 students
## [1] 382 53
# see the new column names
colnames(students_math_por_joint)
## [1] "school" "sex" "age" "address"
## [5] "famsize" "Pstatus" "Medu" "Fedu"
## [9] "Mjob" "Fjob" "reason" "guardian_math"
## [13] "traveltime_math" "studytime_math" "failures_math" "schoolsup_math"
## [17] "famsup_math" "paid_math" "activities_math" "nursery"
## [21] "higher_math" "internet" "romantic_math" "famrel_math"
## [25] "freetime_math" "goout_math" "Dalc_math" "Walc_math"
## [29] "health_math" "absences_math" "G1_math" "G2_math"
## [33] "G3_math" "guardian_por" "traveltime_por" "studytime_por"
## [37] "failures_por" "schoolsup_por" "famsup_por" "paid_por"
## [41] "activities_por" "higher_por" "romantic_por" "famrel_por"
## [45] "freetime_por" "goout_por" "Dalc_por" "Walc_por"
## [49] "health_por" "absences_por" "G1_por" "G2_por"
## [53] "G3_por"
# view the file in R
#View(students_math_por_joint)
# glimpse at the data, it is like str() but prints more data. In other words, like a transposed version of print()
glimpse(students_math_por_joint)
## Rows: 382
## Columns: 53
## $ school <chr> "GP", "GP", "GP", "GP", "GP", "GP", "GP", "GP", "GP...
## $ sex <chr> "F", "F", "F", "F", "F", "M", "M", "F", "M", "M", "...
## $ age <int> 18, 17, 15, 15, 16, 16, 16, 17, 15, 15, 15, 15, 15,...
## $ address <chr> "U", "U", "U", "U", "U", "U", "U", "U", "U", "U", "...
## $ famsize <chr> "GT3", "GT3", "LE3", "GT3", "GT3", "LE3", "LE3", "G...
## $ Pstatus <chr> "A", "T", "T", "T", "T", "T", "T", "A", "A", "T", "...
## $ Medu <int> 4, 1, 1, 4, 3, 4, 2, 4, 3, 3, 4, 2, 4, 4, 2, 4, 4, ...
## $ Fedu <int> 4, 1, 1, 2, 3, 3, 2, 4, 2, 4, 4, 1, 4, 3, 2, 4, 4, ...
## $ Mjob <chr> "at_home", "at_home", "at_home", "health", "other",...
## $ Fjob <chr> "teacher", "other", "other", "services", "other", "...
## $ reason <chr> "course", "course", "other", "home", "home", "reput...
## $ guardian_math <chr> "mother", "father", "mother", "mother", "father", "...
## $ traveltime_math <int> 2, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 3, 1, 2, 1, 1, 1, ...
## $ studytime_math <int> 2, 2, 2, 3, 2, 2, 2, 2, 2, 2, 2, 3, 1, 2, 3, 1, 3, ...
## $ failures_math <int> 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
## $ schoolsup_math <chr> "yes", "no", "yes", "no", "no", "no", "no", "yes", ...
## $ famsup_math <chr> "no", "yes", "no", "yes", "yes", "yes", "no", "yes"...
## $ paid_math <chr> "no", "no", "yes", "yes", "yes", "yes", "no", "no",...
## $ activities_math <chr> "no", "no", "no", "yes", "no", "yes", "no", "no", "...
## $ nursery <chr> "yes", "no", "yes", "yes", "yes", "yes", "yes", "ye...
## $ higher_math <chr> "yes", "yes", "yes", "yes", "yes", "yes", "yes", "y...
## $ internet <chr> "no", "yes", "yes", "yes", "no", "yes", "yes", "no"...
## $ romantic_math <chr> "no", "no", "no", "yes", "no", "no", "no", "no", "n...
## $ famrel_math <int> 4, 5, 4, 3, 4, 5, 4, 4, 4, 5, 3, 5, 4, 5, 4, 4, 3, ...
## $ freetime_math <int> 3, 3, 3, 2, 3, 4, 4, 1, 2, 5, 3, 2, 3, 4, 5, 4, 2, ...
## $ goout_math <int> 4, 3, 2, 2, 2, 2, 4, 4, 2, 1, 3, 2, 3, 3, 2, 4, 3, ...
## $ Dalc_math <int> 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ...
## $ Walc_math <int> 1, 1, 3, 1, 2, 2, 1, 1, 1, 1, 2, 1, 3, 2, 1, 2, 2, ...
## $ health_math <int> 3, 3, 3, 5, 5, 5, 3, 1, 1, 5, 2, 4, 5, 3, 3, 2, 2, ...
## $ absences_math <int> 6, 4, 10, 2, 4, 10, 0, 6, 0, 0, 0, 4, 2, 2, 0, 4, 6...
## $ G1_math <int> 5, 5, 7, 15, 6, 15, 12, 6, 16, 14, 10, 10, 14, 10, ...
## $ G2_math <int> 6, 5, 8, 14, 10, 15, 12, 5, 18, 15, 8, 12, 14, 10, ...
## $ G3_math <int> 6, 6, 10, 15, 10, 15, 11, 6, 19, 15, 9, 12, 14, 11,...
## $ guardian_por <chr> "mother", "father", "mother", "mother", "father", "...
## $ traveltime_por <int> 2, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 3, 1, 2, 1, 1, 1, ...
## $ studytime_por <int> 2, 2, 2, 3, 2, 2, 2, 2, 2, 2, 2, 3, 1, 2, 3, 1, 3, ...
## $ failures_por <int> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...
## $ schoolsup_por <chr> "yes", "no", "yes", "no", "no", "no", "no", "yes", ...
## $ famsup_por <chr> "no", "yes", "no", "yes", "yes", "yes", "no", "yes"...
## $ paid_por <chr> "no", "no", "no", "no", "no", "no", "no", "no", "no...
## $ activities_por <chr> "no", "no", "no", "yes", "no", "yes", "no", "no", "...
## $ higher_por <chr> "yes", "yes", "yes", "yes", "yes", "yes", "yes", "y...
## $ romantic_por <chr> "no", "no", "no", "yes", "no", "no", "no", "no", "n...
## $ famrel_por <int> 4, 5, 4, 3, 4, 5, 4, 4, 4, 5, 3, 5, 4, 5, 4, 4, 3, ...
## $ freetime_por <int> 3, 3, 3, 2, 3, 4, 4, 1, 2, 5, 3, 2, 3, 4, 5, 4, 2, ...
## $ goout_por <int> 4, 3, 2, 2, 2, 2, 4, 4, 2, 1, 3, 2, 3, 3, 2, 4, 3, ...
## $ Dalc_por <int> 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ...
## $ Walc_por <int> 1, 1, 3, 1, 2, 2, 1, 1, 1, 1, 2, 1, 3, 2, 1, 2, 2, ...
## $ health_por <int> 3, 3, 3, 5, 5, 5, 3, 1, 1, 5, 2, 4, 5, 3, 3, 2, 2, ...
## $ absences_por <int> 4, 2, 6, 0, 0, 6, 0, 2, 0, 0, 2, 0, 0, 0, 0, 6, 10,...
## $ G1_por <int> 0, 9, 12, 14, 11, 12, 13, 10, 15, 12, 14, 10, 12, 1...
## $ G2_por <int> 11, 11, 13, 14, 13, 12, 12, 13, 16, 12, 14, 12, 13,...
## $ G3_por <int> 11, 11, 12, 14, 13, 13, 13, 13, 17, 13, 14, 13, 12,...
# check data structure
str(students_math_por_joint)
## 'data.frame': 382 obs. of 53 variables:
## $ school : chr "GP" "GP" "GP" "GP" ...
## $ sex : chr "F" "F" "F" "F" ...
## $ age : int 18 17 15 15 16 16 16 17 15 15 ...
## $ address : chr "U" "U" "U" "U" ...
## $ famsize : chr "GT3" "GT3" "LE3" "GT3" ...
## $ Pstatus : chr "A" "T" "T" "T" ...
## $ Medu : int 4 1 1 4 3 4 2 4 3 3 ...
## $ Fedu : int 4 1 1 2 3 3 2 4 2 4 ...
## $ Mjob : chr "at_home" "at_home" "at_home" "health" ...
## $ Fjob : chr "teacher" "other" "other" "services" ...
## $ reason : chr "course" "course" "other" "home" ...
## $ guardian_math : chr "mother" "father" "mother" "mother" ...
## $ traveltime_math: int 2 1 1 1 1 1 1 2 1 1 ...
## $ studytime_math : int 2 2 2 3 2 2 2 2 2 2 ...
## $ failures_math : int 0 0 3 0 0 0 0 0 0 0 ...
## $ schoolsup_math : chr "yes" "no" "yes" "no" ...
## $ famsup_math : chr "no" "yes" "no" "yes" ...
## $ paid_math : chr "no" "no" "yes" "yes" ...
## $ activities_math: chr "no" "no" "no" "yes" ...
## $ nursery : chr "yes" "no" "yes" "yes" ...
## $ higher_math : chr "yes" "yes" "yes" "yes" ...
## $ internet : chr "no" "yes" "yes" "yes" ...
## $ romantic_math : chr "no" "no" "no" "yes" ...
## $ famrel_math : int 4 5 4 3 4 5 4 4 4 5 ...
## $ freetime_math : int 3 3 3 2 3 4 4 1 2 5 ...
## $ goout_math : int 4 3 2 2 2 2 4 4 2 1 ...
## $ Dalc_math : int 1 1 2 1 1 1 1 1 1 1 ...
## $ Walc_math : int 1 1 3 1 2 2 1 1 1 1 ...
## $ health_math : int 3 3 3 5 5 5 3 1 1 5 ...
## $ absences_math : int 6 4 10 2 4 10 0 6 0 0 ...
## $ G1_math : int 5 5 7 15 6 15 12 6 16 14 ...
## $ G2_math : int 6 5 8 14 10 15 12 5 18 15 ...
## $ G3_math : int 6 6 10 15 10 15 11 6 19 15 ...
## $ guardian_por : chr "mother" "father" "mother" "mother" ...
## $ traveltime_por : int 2 1 1 1 1 1 1 2 1 1 ...
## $ studytime_por : int 2 2 2 3 2 2 2 2 2 2 ...
## $ failures_por : int 0 0 0 0 0 0 0 0 0 0 ...
## $ schoolsup_por : chr "yes" "no" "yes" "no" ...
## $ famsup_por : chr "no" "yes" "no" "yes" ...
## $ paid_por : chr "no" "no" "no" "no" ...
## $ activities_por : chr "no" "no" "no" "yes" ...
## $ higher_por : chr "yes" "yes" "yes" "yes" ...
## $ romantic_por : chr "no" "no" "no" "yes" ...
## $ famrel_por : int 4 5 4 3 4 5 4 4 4 5 ...
## $ freetime_por : int 3 3 3 2 3 4 4 1 2 5 ...
## $ goout_por : int 4 3 2 2 2 2 4 4 2 1 ...
## $ Dalc_por : int 1 1 2 1 1 1 1 1 1 1 ...
## $ Walc_por : int 1 1 3 1 2 2 1 1 1 1 ...
## $ health_por : int 3 3 3 5 5 5 3 1 1 5 ...
## $ absences_por : int 4 2 6 0 0 6 0 2 0 0 ...
## $ G1_por : int 0 9 12 14 11 12 13 10 15 12 ...
## $ G2_por : int 11 11 13 14 13 12 12 13 16 12 ...
## $ G3_por : int 11 11 12 14 13 13 13 13 17 13 ...
Combine the ‘duplicated’ answers in the joined data
# create a new data frame with only the joined columns
alc <- select(students_math_por_joint, one_of(join_by_columns))
# columns that were not used for joining the data
notjoined_columns <- colnames(student_mat)[!colnames(student_mat) %in% join_by_columns]
# print out the columns not used for joining
notjoined_columns
## [1] "guardian" "traveltime" "studytime" "failures" "schoolsup"
## [6] "famsup" "paid" "activities" "higher" "romantic"
## [11] "famrel" "freetime" "goout" "Dalc" "Walc"
## [16] "health" "absences" "G1" "G2" "G3"
# for every column name not used for joining...
for(column_name in notjoined_columns) {
# print the column bane while looping to show the progress
print(column_name)
# select two columns from 'math_por' with the same original name
two_columns <- select(students_math_por_joint, starts_with(column_name))
# select the first column vector of those two columns
first_column <- select(two_columns, 1)[[1]]
# if that first column vector is numeric...
if(is.numeric(first_column)) {
# take a rounded average of each row of the two columns and
# add the resulting vector to the alc data frame
alc[column_name] <- round(rowMeans(two_columns))
} else { # else if it's not numeric...
# add the first column vector to the alc data frame
alc[column_name] <- first_column
}
}
## [1] "guardian"
## [1] "traveltime"
## [1] "studytime"
## [1] "failures"
## [1] "schoolsup"
## [1] "famsup"
## [1] "paid"
## [1] "activities"
## [1] "higher"
## [1] "romantic"
## [1] "famrel"
## [1] "freetime"
## [1] "goout"
## [1] "Dalc"
## [1] "Walc"
## [1] "health"
## [1] "absences"
## [1] "G1"
## [1] "G2"
## [1] "G3"
# glimpse at the new combined data
glimpse(alc)
## Rows: 382
## Columns: 33
## $ school <chr> "GP", "GP", "GP", "GP", "GP", "GP", "GP", "GP", "GP", "G...
## $ sex <chr> "F", "F", "F", "F", "F", "M", "M", "F", "M", "M", "F", "...
## $ age <int> 18, 17, 15, 15, 16, 16, 16, 17, 15, 15, 15, 15, 15, 15, ...
## $ address <chr> "U", "U", "U", "U", "U", "U", "U", "U", "U", "U", "U", "...
## $ famsize <chr> "GT3", "GT3", "LE3", "GT3", "GT3", "LE3", "LE3", "GT3", ...
## $ Pstatus <chr> "A", "T", "T", "T", "T", "T", "T", "A", "A", "T", "T", "...
## $ Medu <int> 4, 1, 1, 4, 3, 4, 2, 4, 3, 3, 4, 2, 4, 4, 2, 4, 4, 3, 3,...
## $ Fedu <int> 4, 1, 1, 2, 3, 3, 2, 4, 2, 4, 4, 1, 4, 3, 2, 4, 4, 3, 2,...
## $ Mjob <chr> "at_home", "at_home", "at_home", "health", "other", "ser...
## $ Fjob <chr> "teacher", "other", "other", "services", "other", "other...
## $ reason <chr> "course", "course", "other", "home", "home", "reputation...
## $ nursery <chr> "yes", "no", "yes", "yes", "yes", "yes", "yes", "yes", "...
## $ internet <chr> "no", "yes", "yes", "yes", "no", "yes", "yes", "no", "ye...
## $ guardian <chr> "mother", "father", "mother", "mother", "father", "mothe...
## $ traveltime <dbl> 2, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 3, 1, 2, 1, 1, 1, 3, 1,...
## $ studytime <dbl> 2, 2, 2, 3, 2, 2, 2, 2, 2, 2, 2, 3, 1, 2, 3, 1, 3, 2, 1,...
## $ failures <dbl> 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3,...
## $ schoolsup <chr> "yes", "no", "yes", "no", "no", "no", "no", "yes", "no",...
## $ famsup <chr> "no", "yes", "no", "yes", "yes", "yes", "no", "yes", "ye...
## $ paid <chr> "no", "no", "yes", "yes", "yes", "yes", "no", "no", "yes...
## $ activities <chr> "no", "no", "no", "yes", "no", "yes", "no", "no", "no", ...
## $ higher <chr> "yes", "yes", "yes", "yes", "yes", "yes", "yes", "yes", ...
## $ romantic <chr> "no", "no", "no", "yes", "no", "no", "no", "no", "no", "...
## $ famrel <dbl> 4, 5, 4, 3, 4, 5, 4, 4, 4, 5, 3, 5, 4, 5, 4, 4, 3, 5, 5,...
## $ freetime <dbl> 3, 3, 3, 2, 3, 4, 4, 1, 2, 5, 3, 2, 3, 4, 5, 4, 2, 3, 5,...
## $ goout <dbl> 4, 3, 2, 2, 2, 2, 4, 4, 2, 1, 3, 2, 3, 3, 2, 4, 3, 2, 5,...
## $ Dalc <dbl> 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2,...
## $ Walc <dbl> 1, 1, 3, 1, 2, 2, 1, 1, 1, 1, 2, 1, 3, 2, 1, 2, 2, 1, 4,...
## $ health <dbl> 3, 3, 3, 5, 5, 5, 3, 1, 1, 5, 2, 4, 5, 3, 3, 2, 2, 4, 5,...
## $ absences <dbl> 5, 3, 8, 1, 2, 8, 0, 4, 0, 0, 1, 2, 1, 1, 0, 5, 8, 3, 9,...
## $ G1 <dbl> 2, 7, 10, 14, 8, 14, 12, 8, 16, 13, 12, 10, 13, 11, 14, ...
## $ G2 <dbl> 8, 8, 10, 14, 12, 14, 12, 9, 17, 14, 11, 12, 14, 11, 15,...
## $ G3 <dbl> 8, 8, 11, 14, 12, 14, 12, 10, 18, 14, 12, 12, 13, 12, 16...
Define a new column alc_use by combining weekday and weekend alcohol use (i.e. columns Dalc and Walc, respectively)
alc <- mutate(alc, alc_use = (Dalc + Walc) / 2)
# load library for plotting
library(ggplot2)
# Let's plot the alcohol use based on gender
g1 <- ggplot(data = alc, aes(x = alc_use, fill = sex))
# define the plot as a bar plot and draw it
g1 + geom_bar()
# create a new logical column named 'high_use'. This column will make a True/Fale column if consuption is >2
alc <- mutate(alc, high_use = alc_use > 2)
# initialize a plot of 'high_use'
g2 <- ggplot(alc, aes(high_use))
# draw a bar plot of high_use by sex
g2 + facet_wrap("sex") + geom_bar()
table(alc$high_use)
##
## FALSE TRUE
## 268 114
"as it shows 114 students were consumed alcohol more than threshold (2) so were high use"
## [1] "as it shows 114 students were consumed alcohol more than threshold (2) so were high use"
table(alc$high_use)/nrow(alc)*100
##
## FALSE TRUE
## 70.15707 29.84293
"It means that 29.84% of students were high use, while majority (70.15%) were low use"
## [1] "It means that 29.84% of students were high use, while majority (70.15%) were low use"
table(alc$alc_use)/nrow(alc)*100
##
## 1 1.5 2 2.5 3 3.5 4
## 36.6492147 18.0628272 15.4450262 11.5183246 8.3769634 4.4502618 2.3560209
## 4.5 5
## 0.7853403 2.3560209
"as this shows, majoriy of them (36.65%) consumed just 1"
## [1] "as this shows, majoriy of them (36.65%) consumed just 1"
# Let's make a histogram and check the distribution
hist(alc$alc_use)
# Let's make a boxplot to further see the distribution
boxplot(alc$alc_use)
# read libraries
library(tidyr); library(dplyr); library(ggplot2)
# glimpse at the alc data
glimpse(alc)
## Rows: 382
## Columns: 35
## $ school <chr> "GP", "GP", "GP", "GP", "GP", "GP", "GP", "GP", "GP", "G...
## $ sex <chr> "F", "F", "F", "F", "F", "M", "M", "F", "M", "M", "F", "...
## $ age <int> 18, 17, 15, 15, 16, 16, 16, 17, 15, 15, 15, 15, 15, 15, ...
## $ address <chr> "U", "U", "U", "U", "U", "U", "U", "U", "U", "U", "U", "...
## $ famsize <chr> "GT3", "GT3", "LE3", "GT3", "GT3", "LE3", "LE3", "GT3", ...
## $ Pstatus <chr> "A", "T", "T", "T", "T", "T", "T", "A", "A", "T", "T", "...
## $ Medu <int> 4, 1, 1, 4, 3, 4, 2, 4, 3, 3, 4, 2, 4, 4, 2, 4, 4, 3, 3,...
## $ Fedu <int> 4, 1, 1, 2, 3, 3, 2, 4, 2, 4, 4, 1, 4, 3, 2, 4, 4, 3, 2,...
## $ Mjob <chr> "at_home", "at_home", "at_home", "health", "other", "ser...
## $ Fjob <chr> "teacher", "other", "other", "services", "other", "other...
## $ reason <chr> "course", "course", "other", "home", "home", "reputation...
## $ nursery <chr> "yes", "no", "yes", "yes", "yes", "yes", "yes", "yes", "...
## $ internet <chr> "no", "yes", "yes", "yes", "no", "yes", "yes", "no", "ye...
## $ guardian <chr> "mother", "father", "mother", "mother", "father", "mothe...
## $ traveltime <dbl> 2, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 3, 1, 2, 1, 1, 1, 3, 1,...
## $ studytime <dbl> 2, 2, 2, 3, 2, 2, 2, 2, 2, 2, 2, 3, 1, 2, 3, 1, 3, 2, 1,...
## $ failures <dbl> 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3,...
## $ schoolsup <chr> "yes", "no", "yes", "no", "no", "no", "no", "yes", "no",...
## $ famsup <chr> "no", "yes", "no", "yes", "yes", "yes", "no", "yes", "ye...
## $ paid <chr> "no", "no", "yes", "yes", "yes", "yes", "no", "no", "yes...
## $ activities <chr> "no", "no", "no", "yes", "no", "yes", "no", "no", "no", ...
## $ higher <chr> "yes", "yes", "yes", "yes", "yes", "yes", "yes", "yes", ...
## $ romantic <chr> "no", "no", "no", "yes", "no", "no", "no", "no", "no", "...
## $ famrel <dbl> 4, 5, 4, 3, 4, 5, 4, 4, 4, 5, 3, 5, 4, 5, 4, 4, 3, 5, 5,...
## $ freetime <dbl> 3, 3, 3, 2, 3, 4, 4, 1, 2, 5, 3, 2, 3, 4, 5, 4, 2, 3, 5,...
## $ goout <dbl> 4, 3, 2, 2, 2, 2, 4, 4, 2, 1, 3, 2, 3, 3, 2, 4, 3, 2, 5,...
## $ Dalc <dbl> 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2,...
## $ Walc <dbl> 1, 1, 3, 1, 2, 2, 1, 1, 1, 1, 2, 1, 3, 2, 1, 2, 2, 1, 4,...
## $ health <dbl> 3, 3, 3, 5, 5, 5, 3, 1, 1, 5, 2, 4, 5, 3, 3, 2, 2, 4, 5,...
## $ absences <dbl> 5, 3, 8, 1, 2, 8, 0, 4, 0, 0, 1, 2, 1, 1, 0, 5, 8, 3, 9,...
## $ G1 <dbl> 2, 7, 10, 14, 8, 14, 12, 8, 16, 13, 12, 10, 13, 11, 14, ...
## $ G2 <dbl> 8, 8, 10, 14, 12, 14, 12, 9, 17, 14, 11, 12, 14, 11, 15,...
## $ G3 <dbl> 8, 8, 11, 14, 12, 14, 12, 10, 18, 14, 12, 12, 13, 12, 16...
## $ alc_use <dbl> 1.0, 1.0, 2.5, 1.0, 1.5, 1.5, 1.0, 1.0, 1.0, 1.0, 1.5, 1...
## $ high_use <lgl> FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, F...
# use gather() to gather columns into key-value pairs and then glimpse() at the resulting data
gather(alc) %>% glimpse
## Rows: 13,370
## Columns: 2
## $ key <chr> "school", "school", "school", "school", "school", "school", "...
## $ value <chr> "GP", "GP", "GP", "GP", "GP", "GP", "GP", "GP", "GP", "GP", "...
# draw a bar plot of each variable
gather(alc) %>% ggplot(aes(value)) + facet_wrap("key", scales = "free") + geom_bar()
# produce summary statistics by group
alc %>% group_by(sex, high_use) %>% summarise(count = n(), mean_grade = mean(G3))
## `summarise()` regrouping output by 'sex' (override with `.groups` argument)
## # A tibble: 4 x 4
## # Groups: sex [2]
## sex high_use count mean_grade
## <chr> <lgl> <int> <dbl>
## 1 F FALSE 156 11.4
## 2 F TRUE 42 11.7
## 3 M FALSE 112 12.2
## 4 M TRUE 72 10.3
Are you interested to create more plots, for example boxplot per groups?
# initialize a plot of high_use and G3
g1 <- ggplot(alc, aes(x = high_use, y = G3, col = sex))
# define the plot as a boxplot and draw it
g1 + geom_boxplot() + ylab("grade")
"So, as plots show, those who consumed alcohol highly, their grade reduced especially for males"
## [1] "So, as plots show, those who consumed alcohol highly, their grade reduced especially for males"
# initialise a plot of high_use and absences
g2 <- ggplot(alc, aes(x = high_use, y = absences, col = sex))
# define the plot as a boxplot and draw it
g2 + geom_boxplot() + ggtitle("Student absences by alcohol consumption and sex")
"As plots show, those who consumed alcohol highly, their absence were higher especially in males"
## [1] "As plots show, those who consumed alcohol highly, their absence were higher especially in males"
dim(alc)
## [1] 382 35
# Save files to your computer directory
write.csv(alc, "alc_joinet.csv")
write.csv(alc, "students_math_por_joint.csv")
Now that we prepared the data, let’s go for analysing the data
# read the data from your computer drive
data_student_perf_alc = read.csv("alc_joinet.csv")
#Alternatively you can read from the URL:
data_student_perf_alc <- read.table("http://s3.amazonaws.com/assets.datacamp.com/production/course_2218/datasets/alc.txt", sep=",", header=TRUE)
Data description:
This data approach student achievement in secondary education of two Portuguese schools. The data attributes include student grades, demographic, social and school related features) and it was collected by using school reports and questionnaires. Two datasets are provided regarding the performance in two distinct subjects: Mathematics (mat) and Portuguese language (por). We joint the two dataset and now are ready to go for analysis phase.
Thus, we will use this dataset to analysis the relationships between high/low alcohol consumption and other variables. Is there any relationship between students performance and alcohol? Logistic regression will be used. ___For more info visit: https://archive.ics.uci.edu/ml/datasets/Student+Performance___
# Variables names are as following
colnames(data_student_perf_alc)
## [1] "school" "sex" "age" "address" "famsize"
## [6] "Pstatus" "Medu" "Fedu" "Mjob" "Fjob"
## [11] "reason" "nursery" "internet" "guardian" "traveltime"
## [16] "studytime" "failures" "schoolsup" "famsup" "paid"
## [21] "activities" "higher" "romantic" "famrel" "freetime"
## [26] "goout" "Dalc" "Walc" "health" "absences"
## [31] "G1" "G2" "G3" "alc_use" "high_use"
I would like to select 4 interesting variables to start modelling. they are as following:
1. Internet: students with high Internet access at home, will consume less alcohol
2. goout: students who go out with friend more, will consume more alcohol
3. absences: students who are often absent from class, consume more alcohol
4. activities: students with higher extra-curricular activities will consume less alcohol
# select only those columns
variables <- c("internet", "goout", "absences", "activities",
"alc_use", "high_use")
dt_some_col =select(data_student_perf_alc, variables)
## Note: Using an external vector in selections is ambiguous.
## i Use `all_of(variables)` instead of `variables` to silence this message.
## i See <https://tidyselect.r-lib.org/reference/faq-external-vector.html>.
## This message is displayed once per session.
dim(dt_some_col)
## [1] 382 6
Draw a bar plot of each variable
gather(dt_some_col) %>% ggplot(aes(value)) + facet_wrap("key", scales = "free") + geom_bar()
# see the proportional table
table(dt_some_col$absences)/nrow(dt_some_col)*100
##
## 0 1 2 3 4 5 6
## 30.3664921 0.7853403 17.5392670 1.5706806 13.3507853 1.0471204 7.8534031
## 7 8 9 10 11 12 13
## 2.0942408 5.4973822 0.7853403 4.4502618 0.5235602 2.6178010 0.7853403
## 14 15 16 17 18 19 20
## 2.8795812 0.7853403 1.5706806 0.2617801 1.0471204 0.2617801 0.5235602
## 21 22 23 24 25 26 28
## 0.2617801 0.7853403 0.2617801 0.2617801 0.2617801 0.2617801 0.2617801
## 30 54 56 75
## 0.2617801 0.2617801 0.2617801 0.2617801
#data structure
str(dt_some_col)
## 'data.frame': 382 obs. of 6 variables:
## $ internet : chr "no" "yes" "yes" "yes" ...
## $ goout : int 4 3 2 2 2 2 4 4 2 1 ...
## $ absences : int 6 4 10 2 4 10 0 6 0 0 ...
## $ activities: chr "no" "no" "no" "yes" ...
## $ alc_use : num 1 1 2.5 1 1.5 1.5 1 1 1 1 ...
## $ high_use : logi FALSE FALSE TRUE FALSE FALSE FALSE ...
#Crosstable
cross_table = xtabs(~alc_use + internet, data = dt_some_col)
cross_table
## internet
## alc_use no yes
## 1 27 117
## 1.5 7 61
## 2 9 49
## 2.5 5 37
## 3 6 26
## 3.5 2 15
## 4 2 7
## 4.5 0 3
## 5 0 9
round(prop.table(cross_table), 2)
## internet
## alc_use no yes
## 1 0.07 0.31
## 1.5 0.02 0.16
## 2 0.02 0.13
## 2.5 0.01 0.10
## 3 0.02 0.07
## 3.5 0.01 0.04
## 4 0.01 0.02
## 4.5 0.00 0.01
## 5 0.00 0.02
#shows if distrubution is significantly differnet
chisq.test(cross_table)
## Warning in chisq.test(cross_table): Chi-squared approximation may be incorrect
##
## Pearson's Chi-squared test
##
## data: cross_table
## X-squared = 6.0051, df = 8, p-value = 0.6467
As p-value > 0.05, thus the distrubution of this variable (internet access at home) is not significantly differnet
Note: don’t worry about the warning message, becuase the number of data is few.
For nother variable
#Crosstable
cross_table = xtabs(~alc_use + goout, data = dt_some_col)
cross_table
## goout
## alc_use 1 2 3 4 5
## 1 16 49 45 25 9
## 1.5 3 22 29 8 6
## 2 2 13 26 10 7
## 2.5 2 7 13 16 4
## 3 0 4 3 14 11
## 3.5 1 2 4 3 7
## 4 0 0 1 5 3
## 4.5 0 1 0 1 1
## 5 0 1 2 0 6
# to get proportion of each, with 2 decimal rounded
round(prop.table(cross_table), 2)
## goout
## alc_use 1 2 3 4 5
## 1 0.04 0.13 0.12 0.07 0.02
## 1.5 0.01 0.06 0.08 0.02 0.02
## 2 0.01 0.03 0.07 0.03 0.02
## 2.5 0.01 0.02 0.03 0.04 0.01
## 3 0.00 0.01 0.01 0.04 0.03
## 3.5 0.00 0.01 0.01 0.01 0.02
## 4 0.00 0.00 0.00 0.01 0.01
## 4.5 0.00 0.00 0.00 0.00 0.00
## 5 0.00 0.00 0.01 0.00 0.02
#if distrubution is signoficantly differnet
chisq.test(cross_table)
## Warning in chisq.test(cross_table): Chi-squared approximation may be incorrect
##
## Pearson's Chi-squared test
##
## data: cross_table
## X-squared = 108.14, df = 32, p-value = 3.414e-10
As p-value < 0.05, thus the distrubution of this variable (going out behaviour) is significantly differnet
Note: don’t worry about the warning message, becuase the number of data is few.
Yet, for another variable
#Crosstable
cross_table = xtabs(~alc_use + absences, data = dt_some_col)
cross_table
## absences
## alc_use 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
## 1 58 2 28 0 17 0 11 3 8 2 5 0 3 0 1 1 1 0 1 0 1 0 0 0
## 1.5 22 0 16 0 11 1 4 0 2 1 4 0 2 0 2 0 0 0 1 0 0 0 0 1
## 2 14 0 10 2 8 3 4 3 3 0 3 1 1 0 4 0 1 0 0 0 0 1 0 0
## 2.5 7 1 5 2 4 0 5 2 1 0 2 1 3 0 1 0 1 1 1 0 1 0 2 0
## 3 8 0 7 0 5 0 1 0 3 0 1 0 0 1 0 2 1 0 0 0 0 0 1 0
## 3.5 5 0 1 1 2 0 1 0 2 0 1 0 0 1 1 0 0 0 0 1 0 0 0 0
## 4 1 0 0 1 2 0 3 0 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0
## 4.5 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 0 0 0 0 0 0 0 0 0
## 5 1 0 0 0 2 0 1 0 1 0 1 0 0 0 1 0 2 0 0 0 0 0 0 0
## absences
## alc_use 24 25 26 28 30 54 56 75
## 1 0 0 1 0 0 0 0 1
## 1.5 0 1 0 0 0 0 0 0
## 2 0 0 0 0 0 0 0 0
## 2.5 1 0 0 0 0 0 1 0
## 3 0 0 0 0 1 1 0 0
## 3.5 0 0 0 1 0 0 0 0
## 4 0 0 0 0 0 0 0 0
## 4.5 0 0 0 0 0 0 0 0
## 5 0 0 0 0 0 0 0 0
# to get proportion of each, with 2 decimal rounded
round(prop.table(cross_table), 2)
## absences
## alc_use 0 1 2 3 4 5 6 7 8 9 10 11 12 13
## 1 0.15 0.01 0.07 0.00 0.04 0.00 0.03 0.01 0.02 0.01 0.01 0.00 0.01 0.00
## 1.5 0.06 0.00 0.04 0.00 0.03 0.00 0.01 0.00 0.01 0.00 0.01 0.00 0.01 0.00
## 2 0.04 0.00 0.03 0.01 0.02 0.01 0.01 0.01 0.01 0.00 0.01 0.00 0.00 0.00
## 2.5 0.02 0.00 0.01 0.01 0.01 0.00 0.01 0.01 0.00 0.00 0.01 0.00 0.01 0.00
## 3 0.02 0.00 0.02 0.00 0.01 0.00 0.00 0.00 0.01 0.00 0.00 0.00 0.00 0.00
## 3.5 0.01 0.00 0.00 0.00 0.01 0.00 0.00 0.00 0.01 0.00 0.00 0.00 0.00 0.00
## 4 0.00 0.00 0.00 0.00 0.01 0.00 0.01 0.00 0.00 0.00 0.00 0.00 0.00 0.00
## 4.5 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00
## 5 0.00 0.00 0.00 0.00 0.01 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00
## absences
## alc_use 14 15 16 17 18 19 20 21 22 23 24 25 26 28
## 1 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00
## 1.5 0.01 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00
## 2 0.01 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00
## 2.5 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.01 0.00 0.00 0.00 0.00 0.00
## 3 0.00 0.01 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00
## 3.5 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00
## 4 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00
## 4.5 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00
## 5 0.00 0.00 0.01 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00
## absences
## alc_use 30 54 56 75
## 1 0.00 0.00 0.00 0.00
## 1.5 0.00 0.00 0.00 0.00
## 2 0.00 0.00 0.00 0.00
## 2.5 0.00 0.00 0.00 0.00
## 3 0.00 0.00 0.00 0.00
## 3.5 0.00 0.00 0.00 0.00
## 4 0.00 0.00 0.00 0.00
## 4.5 0.00 0.00 0.00 0.00
## 5 0.00 0.00 0.00 0.00
#if distrubution is signoficantly differnet
chisq.test(cross_table)
## Warning in chisq.test(cross_table): Chi-squared approximation may be incorrect
##
## Pearson's Chi-squared test
##
## data: cross_table
## X-squared = 348.17, df = 248, p-value = 2.751e-05
As p-value < 0.05, thus the distribution of this variable (absence) is significantly different.
Note: don’t worry about the warning message, because the number of data is few.
Yet, for nother variable
#Crosstable
cross_table = xtabs(~alc_use + activities, data = dt_some_col)
cross_table
## activities
## alc_use no yes
## 1 58 86
## 1.5 40 28
## 2 24 34
## 2.5 22 20
## 3 18 14
## 3.5 10 7
## 4 4 5
## 4.5 1 2
## 5 4 5
# to get proportion of each, with 2 decimal rounded
round(prop.table(cross_table), 2)
## activities
## alc_use no yes
## 1 0.15 0.23
## 1.5 0.10 0.07
## 2 0.06 0.09
## 2.5 0.06 0.05
## 3 0.05 0.04
## 3.5 0.03 0.02
## 4 0.01 0.01
## 4.5 0.00 0.01
## 5 0.01 0.01
#if distrubution is signoficantly differnet
chisq.test(cross_table)
## Warning in chisq.test(cross_table): Chi-squared approximation may be incorrect
##
## Pearson's Chi-squared test
##
## data: cross_table
## X-squared = 9.9466, df = 8, p-value = 0.2688
As p-value > 0.05, thus the distribution of this variable (extra-curricular activities) is not significantly different.
Note: don’t worry about the warning message, because the number of data is few.
Let’s barplot the variables of interest
barplot(sort(table(dt_some_col$internet),decreasing=T))
barplot(sort(table(dt_some_col$goout),decreasing=T))
barplot(sort(table(dt_some_col$absences),decreasing=T))
barplot(sort(table(dt_some_col$activities),decreasing=T))
barplot(sort(table(dt_some_col$alc_use),decreasing=T))
barplot(sort(table(dt_some_col$high_use),decreasing=T))
Let’s boxplot the numerical variables of interest
boxplot(dt_some_col$goout)
boxplot(dt_some_col$absences)
boxplot(dt_some_col$alc_use)
And some more
plot(alc_use ~ goout, data=dt_some_col)
plot(alc_use ~ absences, data=dt_some_col)
Alternatively, this code also does the same: “table(dt_some_col\(activities, dt_some_col\)alc_use)”
## Apply logistic regression model
#set binary variables as factor
dt_some_col$internet <- factor(dt_some_col$internet)
dt_some_col$activities <- factor(dt_some_col$activities)
# make the model with glm()
regres_model <- glm(high_use ~ internet + goout + absences + activities, data = dt_some_col, family = "binomial")
# print out a summary of the model
summary(regres_model)
##
## Call:
## glm(formula = high_use ~ internet + goout + absences + activities,
## family = "binomial", data = dt_some_col)
##
## Deviance Residuals:
## Min 1Q Median 3Q Max
## -2.0141 -0.7760 -0.5282 0.9018 2.3876
##
## Coefficients:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -3.47384 0.51472 -6.749 1.49e-11 ***
## internetyes -0.08489 0.35507 -0.239 0.811041
## goout 0.76811 0.11960 6.422 1.34e-10 ***
## absences 0.06175 0.01701 3.629 0.000284 ***
## activitiesyes -0.44819 0.25086 -1.787 0.074000 .
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## (Dispersion parameter for binomial family taken to be 1)
##
## Null deviance: 462.21 on 381 degrees of freedom
## Residual deviance: 393.79 on 377 degrees of freedom
## AIC: 403.79
##
## Number of Fisher Scoring iterations: 4
Based on model summary absence is statistically significant relationship with high alcohol use, because the probability value Pr(>|z|) = 0.000284, thus the model is valid in >99% significance level.
Students with hight going out behaviour, consumed more alcohol. It is statistically proved because Pr(>|z|) = 1.34e-10, thus our hypothesis is accepted in >99% significance level.
However, having Internet access seemed not to have relationship with those who had the Internet at home. Because the Pr(>|z|) for this was >0.05, thus is not statistically significant (at least in 95%). Thus, our hypothesis that we assumed those who have Internet at home will not consume more alcohol, would be rejected.
It is the same as our conclusion of chisq.test explained above!
Notably, the statistical relationship between having more extra-curriculum activities and alcohol consumption was not valid in 95% significance level, however in 90% because the P value is 0.074. So, it shall be rejected, and we can ignore this variable.
Hence, in fact our model would be
alcohol consumption = goingout0.76811 + absence0.06175 + (-3.47384), becuase y = ax1 + bx2 + c
# print out the coefficients of the model
coef(regres_model)
## (Intercept) internetyes goout absences activitiesyes
## -3.47383750 -0.08489077 0.76810971 0.06174629 -0.44819206
# compute odds ratios (OR)
OR <- coef(regres_model) %>% exp
# compute confidence intervals (CI)
CI <- confint(regres_model) %>% exp
## Waiting for profiling to be done...
# bind odds ratios (OR) and confidence intervals (CI) columns together
cbind(OR, CI)
## OR 2.5 % 97.5 %
## (Intercept) 0.03099785 0.01084108 0.08194402
## internetyes 0.91861262 0.46457217 1.88377842
## goout 2.15568752 1.71547363 2.74450680
## absences 1.06369244 1.03125278 1.10286289
## activitiesyes 0.63878199 0.38885454 1.04178158
As it shows, two variables of going out and absence has high odds ratio (same as probability in non-logistic regression models). And their confidence interval is still good because it’s very close interval, so the most of values in the variable are like that, thus with high OR value.
# predict() the probability of high_use
probabilities <- predict(regres_model, type = "response")
# add the predicted probabilities to 'dt_some_col'
dt_some_col <- mutate(dt_some_col, probability = probabilities)
# use the probabilities to make a prediction of high_use
dt_some_col <- mutate(dt_some_col, prediction = probability > 0.5)
# see the last ten original classes, predicted probabilities, and class predictions
select(dt_some_col, internet, goout, absences, activities, probability, prediction) %>% tail(10)
## internet goout absences activities probability prediction
## 373 no 2 0 no 0.12590977 FALSE
## 374 no 3 14 no 0.42432092 FALSE
## 375 no 3 2 no 0.25999091 FALSE
## 376 yes 3 7 yes 0.21919441 FALSE
## 377 yes 2 0 yes 0.07793785 FALSE
## 378 yes 4 0 no 0.38076807 FALSE
## 379 no 1 0 yes 0.04093710 FALSE
## 380 no 1 0 yes 0.04093710 FALSE
## 381 yes 5 3 no 0.61468746 TRUE
## 382 yes 1 0 no 0.05783324 FALSE
# tabulate the target variable versus the predictions
table(high_use = dt_some_col$high_use, prediction = dt_some_col$prediction)
## prediction
## high_use FALSE TRUE
## FALSE 247 23
## TRUE 71 41
Now, 2x2 cross tabulation of predictions versus the actual values
As it shows, 247 cases of high_use alcohol consumptions were false and were predicted correctly as false, 71 of them were miscalssified.
# EXTRA coding: Create a nice graph with confusion matrix
# insprited from https://stackoverflow.com/a/64539733
library('caret')
## Loading required package: lattice
cm <- confusionMatrix(factor(dt_some_col$prediction), factor(dt_some_col$high_use), dnn = c("Prediction", "Reference"))
ggplot(as.data.frame(cm$table), aes(Prediction,sort(Reference,decreasing = T), fill= Freq)) +
geom_tile() + geom_text(aes(label=Freq)) +
scale_fill_gradient(low="white", high="#009193") +
labs(x = "Prediction",y = "Reference") +
scale_x_discrete(labels=c("False","True")) +
scale_y_discrete(labels=c("True","False"))
Good plots!
As explained above, it shows that 247 cases of high_use alcohol consumptions were false and were predicted correctly as false, 71 of them were miscalssifed.
# initialize a plot of 'high_use' versus 'probability' in 'dt_some_col'
g <- ggplot(dt_some_col, aes(x = probability, y = high_use, col = prediction))
# define the geom as points and draw the plot
g + geom_point()
# tabulate the target variable versus the predictions
table(high_use = dt_some_col$high_use, prediction = dt_some_col$prediction) %>% prop.table %>% addmargins
## prediction
## high_use FALSE TRUE Sum
## FALSE 0.64659686 0.06020942 0.70680628
## TRUE 0.18586387 0.10732984 0.29319372
## Sum 0.83246073 0.16753927 1.00000000
Define a loss function (average prediction error)
loss_func <- function(class, prob) {
n_wrong <- abs(class - prob) > 0.5
mean(n_wrong)
}
# call loss_func to compute the average number of wrong predictions in the (training) data
loss_func(class = dt_some_col$high_use, prob = dt_some_col$probability)
## [1] 0.2460733
Perform leave-one-out cross-validation and print out the mean prediction error for the testing data. It divides the observations into k number of folds, leaves-one-out for validation and uses the rest for training, for example, if k=5, uses 4-folds for training and 1-fold for validation
#load library
library(boot)
##
## Attaching package: 'boot'
## The following object is masked from 'package:lattice':
##
## melanoma
# cv.glm function from the 'boot' library computes the error and stores it in delta
cv <- cv.glm(data = dt_some_col, cost = loss_func, glmfit = regres_model, K = 2)
# average number of wrong predictions in the cross validation
cv$delta[1]
## [1] 0.2565445
k = c(2,3,4,5,6,7,8,9,10,11,12,13,14,15,20,25)
for (i in k){
print(i)
cv <- cv.glm(data = dt_some_col, cost = loss_func, glmfit = regres_model, K = i)
# average number of wrong predictions in the cross validation
print (cv$delta[1])}
## [1] 2
## [1] 0.2643979
## [1] 3
## [1] 0.2513089
## [1] 4
## [1] 0.2486911
## [1] 5
## [1] 0.2591623
## [1] 6
## [1] 0.2513089
## [1] 7
## [1] 0.2434555
## [1] 8
## [1] 0.2513089
## [1] 9
## [1] 0.2591623
## [1] 10
## [1] 0.2513089
## [1] 11
## [1] 0.2565445
## [1] 12
## [1] 0.2565445
## [1] 13
## [1] 0.2539267
## [1] 14
## [1] 0.2617801
## [1] 15
## [1] 0.2591623
## [1] 20
## [1] 0.2539267
## [1] 25
## [1] 0.2513089
So, the accuracy of model improved by hyper tuning using cross-validation values in loop (above), because the, average number of wrong predictions (shown by cv$delta[1] in above) decreased a bit.
## Super-Bonus: Create logistic regression with other variables
I like to add two variables such as romantic (weather or not in a romantic relationship) and famrel (the quality of family relationship).
I keep the previously significant variables
colnames(data_student_perf_alc)
## [1] "school" "sex" "age" "address" "famsize"
## [6] "Pstatus" "Medu" "Fedu" "Mjob" "Fjob"
## [11] "reason" "nursery" "internet" "guardian" "traveltime"
## [16] "studytime" "failures" "schoolsup" "famsup" "paid"
## [21] "activities" "higher" "romantic" "famrel" "freetime"
## [26] "goout" "Dalc" "Walc" "health" "absences"
## [31] "G1" "G2" "G3" "alc_use" "high_use"
variables <- c("romantic", "goout", "absences", "famrel",
"alc_use", "high_use")
dt_some_col =select(data_student_perf_alc, variables)
dim(dt_some_col)
## [1] 382 6
# draw a bar plot of each variable
gather(dt_some_col) %>% ggplot(aes(value)) + facet_wrap("key", scales = "free") + geom_bar()
table(dt_some_col$absences)/nrow(dt_some_col)*100
##
## 0 1 2 3 4 5 6
## 30.3664921 0.7853403 17.5392670 1.5706806 13.3507853 1.0471204 7.8534031
## 7 8 9 10 11 12 13
## 2.0942408 5.4973822 0.7853403 4.4502618 0.5235602 2.6178010 0.7853403
## 14 15 16 17 18 19 20
## 2.8795812 0.7853403 1.5706806 0.2617801 1.0471204 0.2617801 0.5235602
## 21 22 23 24 25 26 28
## 0.2617801 0.7853403 0.2617801 0.2617801 0.2617801 0.2617801 0.2617801
## 30 54 56 75
## 0.2617801 0.2617801 0.2617801 0.2617801
barplot(sort(table(dt_some_col$romantic),decreasing=T))
barplot(sort(table(dt_some_col$goout),decreasing=T))
barplot(sort(table(dt_some_col$absences),decreasing=T))
barplot(sort(table(dt_some_col$famrel),decreasing=T))
barplot(sort(table(dt_some_col$alc_use),decreasing=T))
barplot(sort(table(dt_some_col$high_use),decreasing=T))
#set binary variables as factor
dt_some_col$romantic <- factor(dt_some_col$romantic)
# find the model with glm()
regres_model <- glm(high_use ~ romantic + goout + absences + famrel, data = dt_some_col, family = "binomial")
# print out a summary of the model
summary(regres_model)
##
## Call:
## glm(formula = high_use ~ romantic + goout + absences + famrel,
## family = "binomial", data = dt_some_col)
##
## Deviance Residuals:
## Min 1Q Median 3Q Max
## -1.9599 -0.7789 -0.5080 0.8454 2.4681
##
## Coefficients:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -2.15491 0.63232 -3.408 0.000655 ***
## romanticyes -0.37335 0.27759 -1.345 0.178641
## goout 0.79978 0.12171 6.571 4.99e-11 ***
## absences 0.06001 0.01641 3.656 0.000256 ***
## famrel -0.41049 0.13511 -3.038 0.002380 **
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## (Dispersion parameter for binomial family taken to be 1)
##
## Null deviance: 462.21 on 381 degrees of freedom
## Residual deviance: 386.46 on 377 degrees of freedom
## AIC: 396.46
##
## Number of Fisher Scoring iterations: 4
Based on model summary the quality of family relationship (famrel) has statistically significant relationship with high alcohol use, because the probability value Pr(>|z|) = 0.002380, thus the model is valid in >99% significance level.
Students with hight going out behaviour, consumed more alcohol. It is statistically proved becuase Pr(>|z|) = 1.34e-10, thus our hypothesis is accepted in >99% significance level.
Thus, this model would in fact be model be like this:
alcohol consumption = goingout0.79978 + absence0.06001 + romanticyes * -0.37335 + (-2.15491), becuase y = ax1 + bx2 + cx3 + c
# print out the coefficients of the model
coef(regres_model)
## (Intercept) romanticyes goout absences famrel
## -2.15490562 -0.37335072 0.79978011 0.06000845 -0.41048560
# compute odds ratios (OR)
OR <- coef(regres_model) %>% exp
# compute confidence intervals (CI)
CI <- confint(regres_model) %>% exp
## Waiting for profiling to be done...
# bind odds ratios (OR) and confidence intervals (CI) columns together
cbind(OR, CI)
## OR 2.5 % 97.5 %
## (Intercept) 0.1159141 0.03245399 0.3900847
## romanticyes 0.6884237 0.39504416 1.1763720
## goout 2.2250516 1.76429361 2.8462267
## absences 1.0618455 1.03000291 1.0997256
## famrel 0.6633281 0.50735265 0.8632692
# predict() the probability of high_use
probabilities <- predict(regres_model, type = "response")
# add the predicted probabilities to 'dt_some_col'
dt_some_col <- mutate(dt_some_col, probability = probabilities)
# use the probabilities to make a prediction of high_use
dt_some_col <- mutate(dt_some_col, prediction = probability > 0.5)
# see the last ten original classes, predicted probabilities, and class predictions
select(dt_some_col, romantic , goout , absences , famrel, probability, prediction) %>% tail(10)
## romantic goout absences famrel probability prediction
## 373 no 2 0 4 0.09999431 FALSE
## 374 no 3 14 5 0.27530426 FALSE
## 375 no 3 2 5 0.15604215 FALSE
## 376 yes 3 7 4 0.20573973 FALSE
## 377 no 2 0 5 0.06863981 FALSE
## 378 no 4 0 4 0.35486376 FALSE
## 379 no 1 0 1 0.14608898 FALSE
## 380 no 1 0 1 0.14608898 FALSE
## 381 no 5 3 2 0.76906675 TRUE
## 382 no 1 0 4 0.04755851 FALSE
# tabulate the target variable versus the predictions
table(high_use = dt_some_col$high_use, prediction = dt_some_col$prediction)
## prediction
## high_use FALSE TRUE
## FALSE 251 19
## TRUE 66 46
As shown above, 251 cases of high_use alcohol consumptions were false and were predicted correctly as false, 66 of them were miscalssifed, so the accuracy improved.
Moreover, the numbers in diagonal (251 and 46) were increased compared to above model, which shoes the increase of the corrected predicted values, and hence, increase in model accuracy
# call loss_func to compute the average number of wrong predictions in the (training) data
loss_func(class = dt_some_col$high_use, prob = dt_some_col$probability)
## [1] 0.2225131
K-fold cross-validation
As explained also above, let’s perform leave-one-out cross-validation and print out the mean prediction error for the testing data
It divids the observations into k number of folds, leaves-one-out for validation and uses the rest for training, for example, if k=5, uses 4 folds for training and 1 fold for validation
#load library
library(boot)
# cv.glm function from the 'boot' library computes the error and stores it in delta
cv <- cv.glm(data = dt_some_col, cost = loss_func, glmfit = regres_model, K = 2)
# average number of wrong predictions in the cross validation
cv$delta[1]
## [1] 0.2225131
k = c(2,3,4,5,6,7,8,9,10,11,12,13,14,15,20,25)
for (i in k){
print(i)
cv <- cv.glm(data = dt_some_col, cost = loss_func, glmfit = regres_model, K = i)
# average number of wrong predictions in the cross validation
print (cv$delta[1])}
## [1] 2
## [1] 0.2251309
## [1] 3
## [1] 0.2382199
## [1] 4
## [1] 0.2303665
## [1] 5
## [1] 0.2251309
## [1] 6
## [1] 0.2303665
## [1] 7
## [1] 0.2303665
## [1] 8
## [1] 0.2251309
## [1] 9
## [1] 0.2303665
## [1] 10
## [1] 0.2251309
## [1] 11
## [1] 0.2198953
## [1] 12
## [1] 0.2329843
## [1] 13
## [1] 0.2277487
## [1] 14
## [1] 0.2225131
## [1] 15
## [1] 0.2251309
## [1] 20
## [1] 0.2329843
## [1] 25
## [1] 0.2277487
So, the accuracy of crossvalidation improved by adding the new variable (quality of family relation), because the average number of wrong predictions (shown by cv$delta[1] in above) decreased
Welcome to my page
This is the learning diary for the course ‘Introduction to Open Data Science 2020’. I would like to further develop my data science skills in R and get familiar with Github.
My GitHub repository is https://github.com/Imangholiloo/IODS-project
# Let's get the date and time
date()
## [1] "Wed Nov 18 19:29:31 2020"
alternatively, print(Sys.time())
#load the required library
library(MASS)
##
## Attaching package: 'MASS'
## The following object is masked from 'package:dplyr':
##
## select
#load the data
data("Boston")
Data description:
This dataset is about housing values in suburbs of Boston. More info can be found in: https://stat.ethz.ch/R-manual/R-devel/library/MASS/html/Boston.html
#explore the dataset
str(Boston)
## 'data.frame': 506 obs. of 14 variables:
## $ crim : num 0.00632 0.02731 0.02729 0.03237 0.06905 ...
## $ zn : num 18 0 0 0 0 0 12.5 12.5 12.5 12.5 ...
## $ indus : num 2.31 7.07 7.07 2.18 2.18 2.18 7.87 7.87 7.87 7.87 ...
## $ chas : int 0 0 0 0 0 0 0 0 0 0 ...
## $ nox : num 0.538 0.469 0.469 0.458 0.458 0.458 0.524 0.524 0.524 0.524 ...
## $ rm : num 6.58 6.42 7.18 7 7.15 ...
## $ age : num 65.2 78.9 61.1 45.8 54.2 58.7 66.6 96.1 100 85.9 ...
## $ dis : num 4.09 4.97 4.97 6.06 6.06 ...
## $ rad : int 1 2 2 3 3 3 5 5 5 5 ...
## $ tax : num 296 242 242 222 222 222 311 311 311 311 ...
## $ ptratio: num 15.3 17.8 17.8 18.7 18.7 18.7 15.2 15.2 15.2 15.2 ...
## $ black : num 397 397 393 395 397 ...
## $ lstat : num 4.98 9.14 4.03 2.94 5.33 ...
## $ medv : num 24 21.6 34.7 33.4 36.2 28.7 22.9 27.1 16.5 18.9 ...
#summary of the data
summary(Boston)
## crim zn indus chas
## Min. : 0.00632 Min. : 0.00 Min. : 0.46 Min. :0.00000
## 1st Qu.: 0.08205 1st Qu.: 0.00 1st Qu.: 5.19 1st Qu.:0.00000
## Median : 0.25651 Median : 0.00 Median : 9.69 Median :0.00000
## Mean : 3.61352 Mean : 11.36 Mean :11.14 Mean :0.06917
## 3rd Qu.: 3.67708 3rd Qu.: 12.50 3rd Qu.:18.10 3rd Qu.:0.00000
## Max. :88.97620 Max. :100.00 Max. :27.74 Max. :1.00000
## nox rm age dis
## Min. :0.3850 Min. :3.561 Min. : 2.90 Min. : 1.130
## 1st Qu.:0.4490 1st Qu.:5.886 1st Qu.: 45.02 1st Qu.: 2.100
## Median :0.5380 Median :6.208 Median : 77.50 Median : 3.207
## Mean :0.5547 Mean :6.285 Mean : 68.57 Mean : 3.795
## 3rd Qu.:0.6240 3rd Qu.:6.623 3rd Qu.: 94.08 3rd Qu.: 5.188
## Max. :0.8710 Max. :8.780 Max. :100.00 Max. :12.127
## rad tax ptratio black
## Min. : 1.000 Min. :187.0 Min. :12.60 Min. : 0.32
## 1st Qu.: 4.000 1st Qu.:279.0 1st Qu.:17.40 1st Qu.:375.38
## Median : 5.000 Median :330.0 Median :19.05 Median :391.44
## Mean : 9.549 Mean :408.2 Mean :18.46 Mean :356.67
## 3rd Qu.:24.000 3rd Qu.:666.0 3rd Qu.:20.20 3rd Qu.:396.23
## Max. :24.000 Max. :711.0 Max. :22.00 Max. :396.90
## lstat medv
## Min. : 1.73 Min. : 5.00
## 1st Qu.: 6.95 1st Qu.:17.02
## Median :11.36 Median :21.20
## Mean :12.65 Mean :22.53
## 3rd Qu.:16.95 3rd Qu.:25.00
## Max. :37.97 Max. :50.00
The summary shows minimum, mean, median, maximun and first and thrid quantiles for each columns of the dataset
#check dimention of the data
dim(Boston)
## [1] 506 14
As it shows, this dataset has 506 rows (observations) and 14 columns (variables)
# See head (fist 5 rows) of the file
head(Boston)
## crim zn indus chas nox rm age dis rad tax ptratio black lstat
## 1 0.00632 18 2.31 0 0.538 6.575 65.2 4.0900 1 296 15.3 396.90 4.98
## 2 0.02731 0 7.07 0 0.469 6.421 78.9 4.9671 2 242 17.8 396.90 9.14
## 3 0.02729 0 7.07 0 0.469 7.185 61.1 4.9671 2 242 17.8 392.83 4.03
## 4 0.03237 0 2.18 0 0.458 6.998 45.8 6.0622 3 222 18.7 394.63 2.94
## 5 0.06905 0 2.18 0 0.458 7.147 54.2 6.0622 3 222 18.7 396.90 5.33
## 6 0.02985 0 2.18 0 0.458 6.430 58.7 6.0622 3 222 18.7 394.12 5.21
## medv
## 1 24.0
## 2 21.6
## 3 34.7
## 4 33.4
## 5 36.2
## 6 28.7
# See tail (last 5 rows) of the file
tail(Boston)
## crim zn indus chas nox rm age dis rad tax ptratio black lstat
## 501 0.22438 0 9.69 0 0.585 6.027 79.7 2.4982 6 391 19.2 396.90 14.33
## 502 0.06263 0 11.93 0 0.573 6.593 69.1 2.4786 1 273 21.0 391.99 9.67
## 503 0.04527 0 11.93 0 0.573 6.120 76.7 2.2875 1 273 21.0 396.90 9.08
## 504 0.06076 0 11.93 0 0.573 6.976 91.0 2.1675 1 273 21.0 396.90 5.64
## 505 0.10959 0 11.93 0 0.573 6.794 89.3 2.3889 1 273 21.0 393.45 6.48
## 506 0.04741 0 11.93 0 0.573 6.030 80.8 2.5050 1 273 21.0 396.90 7.88
## medv
## 501 16.8
## 502 22.4
## 503 20.6
## 504 23.9
## 505 22.0
## 506 11.9
#see column names
colnames(Boston)
## [1] "crim" "zn" "indus" "chas" "nox" "rm" "age"
## [8] "dis" "rad" "tax" "ptratio" "black" "lstat" "medv"
# plot matrix of the variables
pairs(Boston)
You can see pair-wise plots of each variable (column) with each other and how they were distributed.
library(ggplot2)
library(GGally)
p <- ggpairs(Boston, mapping = aes(), lower = list(combo = wrap("facethist", bins = 20)))
p
Still some more plots to that show correction of each column (variable) with others and the distribution.
Let’s make also boxplot to visually see the distribution, min, max and quantiles and mean of each variable
boxplot(Boston,
main = "Boxplot of all columns of Boston data",
xlab = "variable name",
ylab = "",
col = "orange",
border = "brown",
horizontal = F,
notch = F)
As the graph shows, the column “tax” has highest variation then black. The values in other columns have quite less variation!
Let’s see the strcutre of data even better using glimpse
# read libraries
library(tidyr); library(dplyr); library(ggplot2)
# glimpse at the alc data
glimpse(Boston)
## Rows: 506
## Columns: 14
## $ crim <dbl> 0.00632, 0.02731, 0.02729, 0.03237, 0.06905, 0.02985, 0.088...
## $ zn <dbl> 18.0, 0.0, 0.0, 0.0, 0.0, 0.0, 12.5, 12.5, 12.5, 12.5, 12.5...
## $ indus <dbl> 2.31, 7.07, 7.07, 2.18, 2.18, 2.18, 7.87, 7.87, 7.87, 7.87,...
## $ chas <int> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,...
## $ nox <dbl> 0.538, 0.469, 0.469, 0.458, 0.458, 0.458, 0.524, 0.524, 0.5...
## $ rm <dbl> 6.575, 6.421, 7.185, 6.998, 7.147, 6.430, 6.012, 6.172, 5.6...
## $ age <dbl> 65.2, 78.9, 61.1, 45.8, 54.2, 58.7, 66.6, 96.1, 100.0, 85.9...
## $ dis <dbl> 4.0900, 4.9671, 4.9671, 6.0622, 6.0622, 6.0622, 5.5605, 5.9...
## $ rad <int> 1, 2, 2, 3, 3, 3, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4,...
## $ tax <dbl> 296, 242, 242, 222, 222, 222, 311, 311, 311, 311, 311, 311,...
## $ ptratio <dbl> 15.3, 17.8, 17.8, 18.7, 18.7, 18.7, 15.2, 15.2, 15.2, 15.2,...
## $ black <dbl> 396.90, 396.90, 392.83, 394.63, 396.90, 394.12, 395.60, 396...
## $ lstat <dbl> 4.98, 9.14, 4.03, 2.94, 5.33, 5.21, 12.43, 19.15, 29.93, 17...
## $ medv <dbl> 24.0, 21.6, 34.7, 33.4, 36.2, 28.7, 22.9, 27.1, 16.5, 18.9,...
# use gather() to gather columns into key-value pairs and then glimpse() at the resulting data
gather(Boston) %>% glimpse
## Rows: 7,084
## Columns: 2
## $ key <chr> "crim", "crim", "crim", "crim", "crim", "crim", "crim", "crim...
## $ value <dbl> 0.00632, 0.02731, 0.02729, 0.03237, 0.06905, 0.02985, 0.08829...
# draw a bar plot of each variable
gather(Boston) %>% ggplot(aes(value)) + facet_wrap("key", scales = "free") + geom_bar()
The graph shows barplot of each column.
Let’s investigate any possible correlctions within data calculate the correlation matrix and round it.
cor_matrix<-cor(Boston) %>% round(digits = 2)
# print the correlation matrix
cor_matrix
## crim zn indus chas nox rm age dis rad tax ptratio
## crim 1.00 -0.20 0.41 -0.06 0.42 -0.22 0.35 -0.38 0.63 0.58 0.29
## zn -0.20 1.00 -0.53 -0.04 -0.52 0.31 -0.57 0.66 -0.31 -0.31 -0.39
## indus 0.41 -0.53 1.00 0.06 0.76 -0.39 0.64 -0.71 0.60 0.72 0.38
## chas -0.06 -0.04 0.06 1.00 0.09 0.09 0.09 -0.10 -0.01 -0.04 -0.12
## nox 0.42 -0.52 0.76 0.09 1.00 -0.30 0.73 -0.77 0.61 0.67 0.19
## rm -0.22 0.31 -0.39 0.09 -0.30 1.00 -0.24 0.21 -0.21 -0.29 -0.36
## age 0.35 -0.57 0.64 0.09 0.73 -0.24 1.00 -0.75 0.46 0.51 0.26
## dis -0.38 0.66 -0.71 -0.10 -0.77 0.21 -0.75 1.00 -0.49 -0.53 -0.23
## rad 0.63 -0.31 0.60 -0.01 0.61 -0.21 0.46 -0.49 1.00 0.91 0.46
## tax 0.58 -0.31 0.72 -0.04 0.67 -0.29 0.51 -0.53 0.91 1.00 0.46
## ptratio 0.29 -0.39 0.38 -0.12 0.19 -0.36 0.26 -0.23 0.46 0.46 1.00
## black -0.39 0.18 -0.36 0.05 -0.38 0.13 -0.27 0.29 -0.44 -0.44 -0.18
## lstat 0.46 -0.41 0.60 -0.05 0.59 -0.61 0.60 -0.50 0.49 0.54 0.37
## medv -0.39 0.36 -0.48 0.18 -0.43 0.70 -0.38 0.25 -0.38 -0.47 -0.51
## black lstat medv
## crim -0.39 0.46 -0.39
## zn 0.18 -0.41 0.36
## indus -0.36 0.60 -0.48
## chas 0.05 -0.05 0.18
## nox -0.38 0.59 -0.43
## rm 0.13 -0.61 0.70
## age -0.27 0.60 -0.38
## dis 0.29 -0.50 0.25
## rad -0.44 0.49 -0.38
## tax -0.44 0.54 -0.47
## ptratio -0.18 0.37 -0.51
## black 1.00 -0.37 0.33
## lstat -0.37 1.00 -0.74
## medv 0.33 -0.74 1.00
inspired from http://www.htmlwidgets.org/showcase_datatables.html
library('DT')
datatable(cor_matrix, options = list(pageLength = 10))
Or alternatively set the options to be limiting by ht enumber of rows of the imput df.
datatable(cor_matrix, options = list(nrow(cor_matrix)))
visualize the correlation matrix
library("corrplot")
## corrplot 0.84 loaded
corrplot(cor_matrix, method="circle", type="upper",
cl.pos="b", tl.pos="d", tl.cex = 0.7)
The graph shows the correlation between variables, the larger the circle (and the darker the color), the higher the correlation is, and color shows if the correlation is positive or negative." "for example, the correlation between nox and dis is strong and negative, ie the increase of one variable, decreases the other one. Or another example is rad and tax, which have strong positive correlation.
On the other hand, chas does not have good correlation with other variables.
Task 4 Let’s standardize the variables suing scale funcation
boston_scaled <- scale(Boston)
As center = True (by defult), now, this scale is done now by cantering is done by subtracting the column means (omitting NAs) of x from their corresponding columns.
# summaries of the scaled variables
summary(boston_scaled)
## crim zn indus chas
## Min. :-0.419367 Min. :-0.48724 Min. :-1.5563 Min. :-0.2723
## 1st Qu.:-0.410563 1st Qu.:-0.48724 1st Qu.:-0.8668 1st Qu.:-0.2723
## Median :-0.390280 Median :-0.48724 Median :-0.2109 Median :-0.2723
## Mean : 0.000000 Mean : 0.00000 Mean : 0.0000 Mean : 0.0000
## 3rd Qu.: 0.007389 3rd Qu.: 0.04872 3rd Qu.: 1.0150 3rd Qu.:-0.2723
## Max. : 9.924110 Max. : 3.80047 Max. : 2.4202 Max. : 3.6648
## nox rm age dis
## Min. :-1.4644 Min. :-3.8764 Min. :-2.3331 Min. :-1.2658
## 1st Qu.:-0.9121 1st Qu.:-0.5681 1st Qu.:-0.8366 1st Qu.:-0.8049
## Median :-0.1441 Median :-0.1084 Median : 0.3171 Median :-0.2790
## Mean : 0.0000 Mean : 0.0000 Mean : 0.0000 Mean : 0.0000
## 3rd Qu.: 0.5981 3rd Qu.: 0.4823 3rd Qu.: 0.9059 3rd Qu.: 0.6617
## Max. : 2.7296 Max. : 3.5515 Max. : 1.1164 Max. : 3.9566
## rad tax ptratio black
## Min. :-0.9819 Min. :-1.3127 Min. :-2.7047 Min. :-3.9033
## 1st Qu.:-0.6373 1st Qu.:-0.7668 1st Qu.:-0.4876 1st Qu.: 0.2049
## Median :-0.5225 Median :-0.4642 Median : 0.2746 Median : 0.3808
## Mean : 0.0000 Mean : 0.0000 Mean : 0.0000 Mean : 0.0000
## 3rd Qu.: 1.6596 3rd Qu.: 1.5294 3rd Qu.: 0.8058 3rd Qu.: 0.4332
## Max. : 1.6596 Max. : 1.7964 Max. : 1.6372 Max. : 0.4406
## lstat medv
## Min. :-1.5296 Min. :-1.9063
## 1st Qu.:-0.7986 1st Qu.:-0.5989
## Median :-0.1811 Median :-0.1449
## Mean : 0.0000 Mean : 0.0000
## 3rd Qu.: 0.6024 3rd Qu.: 0.2683
## Max. : 3.5453 Max. : 2.9865
So, the variables were standardize as explained above. So, the mean of each column were converted to 0 and other values were scales based on that. So, mean of all columns is 0.
# class of the boston_scaled object
class(boston_scaled)
## [1] "matrix" "array"
As you can see, the data class is “matrix” “array”, so we will change the object to dataframe.
# change the object to data frame
boston_scaled <- as.data.frame(boston_scaled)
summary(boston_scaled)
## crim zn indus chas
## Min. :-0.419367 Min. :-0.48724 Min. :-1.5563 Min. :-0.2723
## 1st Qu.:-0.410563 1st Qu.:-0.48724 1st Qu.:-0.8668 1st Qu.:-0.2723
## Median :-0.390280 Median :-0.48724 Median :-0.2109 Median :-0.2723
## Mean : 0.000000 Mean : 0.00000 Mean : 0.0000 Mean : 0.0000
## 3rd Qu.: 0.007389 3rd Qu.: 0.04872 3rd Qu.: 1.0150 3rd Qu.:-0.2723
## Max. : 9.924110 Max. : 3.80047 Max. : 2.4202 Max. : 3.6648
## nox rm age dis
## Min. :-1.4644 Min. :-3.8764 Min. :-2.3331 Min. :-1.2658
## 1st Qu.:-0.9121 1st Qu.:-0.5681 1st Qu.:-0.8366 1st Qu.:-0.8049
## Median :-0.1441 Median :-0.1084 Median : 0.3171 Median :-0.2790
## Mean : 0.0000 Mean : 0.0000 Mean : 0.0000 Mean : 0.0000
## 3rd Qu.: 0.5981 3rd Qu.: 0.4823 3rd Qu.: 0.9059 3rd Qu.: 0.6617
## Max. : 2.7296 Max. : 3.5515 Max. : 1.1164 Max. : 3.9566
## rad tax ptratio black
## Min. :-0.9819 Min. :-1.3127 Min. :-2.7047 Min. :-3.9033
## 1st Qu.:-0.6373 1st Qu.:-0.7668 1st Qu.:-0.4876 1st Qu.: 0.2049
## Median :-0.5225 Median :-0.4642 Median : 0.2746 Median : 0.3808
## Mean : 0.0000 Mean : 0.0000 Mean : 0.0000 Mean : 0.0000
## 3rd Qu.: 1.6596 3rd Qu.: 1.5294 3rd Qu.: 0.8058 3rd Qu.: 0.4332
## Max. : 1.6596 Max. : 1.7964 Max. : 1.6372 Max. : 0.4406
## lstat medv
## Min. :-1.5296 Min. :-1.9063
## 1st Qu.:-0.7986 1st Qu.:-0.5989
## Median :-0.1811 Median :-0.1449
## Mean : 0.0000 Mean : 0.0000
## 3rd Qu.: 0.6024 3rd Qu.: 0.2683
## Max. : 3.5453 Max. : 2.9865
str(boston_scaled)
## 'data.frame': 506 obs. of 14 variables:
## $ crim : num -0.419 -0.417 -0.417 -0.416 -0.412 ...
## $ zn : num 0.285 -0.487 -0.487 -0.487 -0.487 ...
## $ indus : num -1.287 -0.593 -0.593 -1.306 -1.306 ...
## $ chas : num -0.272 -0.272 -0.272 -0.272 -0.272 ...
## $ nox : num -0.144 -0.74 -0.74 -0.834 -0.834 ...
## $ rm : num 0.413 0.194 1.281 1.015 1.227 ...
## $ age : num -0.12 0.367 -0.266 -0.809 -0.511 ...
## $ dis : num 0.14 0.557 0.557 1.077 1.077 ...
## $ rad : num -0.982 -0.867 -0.867 -0.752 -0.752 ...
## $ tax : num -0.666 -0.986 -0.986 -1.105 -1.105 ...
## $ ptratio: num -1.458 -0.303 -0.303 0.113 0.113 ...
## $ black : num 0.441 0.441 0.396 0.416 0.441 ...
## $ lstat : num -1.074 -0.492 -1.208 -1.36 -1.025 ...
## $ medv : num 0.16 -0.101 1.323 1.182 1.486 ...
head(boston_scaled)
## crim zn indus chas nox rm age
## 1 -0.4193669 0.2845483 -1.2866362 -0.2723291 -0.1440749 0.4132629 -0.1198948
## 2 -0.4169267 -0.4872402 -0.5927944 -0.2723291 -0.7395304 0.1940824 0.3668034
## 3 -0.4169290 -0.4872402 -0.5927944 -0.2723291 -0.7395304 1.2814456 -0.2655490
## 4 -0.4163384 -0.4872402 -1.3055857 -0.2723291 -0.8344581 1.0152978 -0.8090878
## 5 -0.4120741 -0.4872402 -1.3055857 -0.2723291 -0.8344581 1.2273620 -0.5106743
## 6 -0.4166314 -0.4872402 -1.3055857 -0.2723291 -0.8344581 0.2068916 -0.3508100
## dis rad tax ptratio black lstat medv
## 1 0.140075 -0.9818712 -0.6659492 -1.4575580 0.4406159 -1.0744990 0.1595278
## 2 0.556609 -0.8670245 -0.9863534 -0.3027945 0.4406159 -0.4919525 -0.1014239
## 3 0.556609 -0.8670245 -0.9863534 -0.3027945 0.3960351 -1.2075324 1.3229375
## 4 1.076671 -0.7521778 -1.1050216 0.1129203 0.4157514 -1.3601708 1.1815886
## 5 1.076671 -0.7521778 -1.1050216 0.1129203 0.4406159 -1.0254866 1.4860323
## 6 1.076671 -0.7521778 -1.1050216 0.1129203 0.4101651 -1.0422909 0.6705582
boxplot(boston_scaled,
main = "Boxplot of all columns of scaled Boston data",
xlab = "variable name",
ylab = "",
col = "orange",
border = "brown",
horizontal = F,
notch = F)
So, now the distribution of data is shown better. Yaaay.
# summary of the scaled crime rate
summary(boston_scaled$crim)
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## -0.419367 -0.410563 -0.390280 0.000000 0.007389 9.924110
As instructed, we shall now use quantile vector of column crim to create a categorical variable of the crime rate in the dataset.
So, let’s start.
# create a quantile vector of crim and print it
bins <- quantile(boston_scaled$crim)
bins
## 0% 25% 50% 75% 100%
## -0.419366929 -0.410563278 -0.390280295 0.007389247 9.924109610
You can see the quantiles of the column called crim, for example minimum is -0.419366929 and first quantile is -0.410563278.
Now, we can catergorise the continuous (float) values based on the quantiles of data (as instructed).
# create a categorical variable 'crime'
crime <- cut(boston_scaled$crim, breaks = bins, include.lowest = TRUE, labels = c("low", "med_low", "med_high", "high"))
cut() divides the range of x into intervals and codes the values in x according to which interval they fall.
Now, we labelled the crim rate to be “low”, “med_low”, “med_high” or “high” based on the quantile values.
# look at the table of the new factor crime
table(crime)
## crime
## low med_low med_high high
## 127 126 126 127
Now, you can see that the number of observation divided/categorised to low is 127, medium low: 126, medium high 126 and high 127 cases/observation.
#we can even plot them
plot(crime)
# remove original crim from the dataset
boston_scaled <- dplyr::select(boston_scaled, -crim)
# add the new categorical value to scaled data
boston_scaled <- data.frame(boston_scaled, crime)
# number of rows in the Boston dataset
n <- nrow(boston_scaled)
# choose randomly 80% of the rows as train dataset
ind <- sample(n, size = n * 0.8)
# create train set
train <- boston_scaled[ind,]
# create test set by selecting those observations (rows) that were not in the training data (using -ind)
test <- boston_scaled[-ind,]
# save the correct classes from test data
correct_classes <- test$crime
# remove the crime variable from test data
test <- dplyr::select(test, -crime)
Task 5: Fit a linear discriminant analysis model on the train set.
lda.fit <- lda(crime ~ ., data = train)
This uses all variables (columns) of the train dataset to predict crime (the categorised column).
# print the lda.fit object
lda.fit
## Call:
## lda(crime ~ ., data = train)
##
## Prior probabilities of groups:
## low med_low med_high high
## 0.2450495 0.2549505 0.2574257 0.2425743
##
## Group means:
## zn indus chas nox rm age
## low 0.98963896 -0.9127414 -0.11325431 -0.9015002 0.50356024 -0.8810358
## med_low -0.08989818 -0.3251951 0.03346513 -0.5960999 -0.09319307 -0.3718148
## med_high -0.36190702 0.1647356 0.10623826 0.3764508 0.07077965 0.4227901
## high -0.48724019 1.0149946 -0.07145661 1.0491257 -0.44385321 0.8188825
## dis rad tax ptratio black lstat
## low 0.8855482 -0.6883741 -0.7602844 -0.468427240 0.3723910 -0.77033968
## med_low 0.3883149 -0.5481298 -0.4636371 0.005740361 0.3206525 -0.18334485
## med_high -0.3924916 -0.4584353 -0.3521630 -0.347208479 0.1039890 0.01823342
## high -0.8741459 1.6596029 1.5294129 0.805778429 -0.7696060 0.90484147
## medv
## low 0.612349288
## med_low 0.003189004
## med_high 0.169459845
## high -0.705540430
##
## Coefficients of linear discriminants:
## LD1 LD2 LD3
## zn 1.389875e-01 0.56141459 -1.04583171
## indus -6.870227e-05 -0.27159833 0.02783293
## chas 5.258962e-03 -0.00999781 0.18838098
## nox 3.126096e-01 -0.91442760 -1.09383503
## rm 3.857288e-02 -0.03238383 -0.07400068
## age 1.710850e-01 -0.36456093 -0.09763043
## dis -1.730141e-01 -0.21606556 0.25045301
## rad 4.036258e+00 0.78208025 -0.33981136
## tax 3.171024e-02 0.26569450 0.81865224
## ptratio 1.760478e-01 0.01220869 -0.15579676
## black -1.419523e-01 0.03883550 0.13551036
## lstat 1.662366e-01 -0.10896029 0.20998148
## medv 2.430750e-02 -0.28786852 -0.31689502
##
## Proportion of trace:
## LD1 LD2 LD3
## 0.9629 0.0278 0.0093
# the function for lda biplot arrows
lda.arrows <- function(x, myscale = 1, arrow_heads = 0.1, color = "orange", tex = 0.75, choices = c(1,2)){
heads <- coef(x)
arrows(x0 = 0, y0 = 0,
x1 = myscale * heads[,choices[1]],
y1 = myscale * heads[,choices[2]], col=color, length = arrow_heads)
text(myscale * heads[,choices], labels = row.names(heads),
cex = tex, col=color, pos=3)
}
# target classes as numeric
classes <- as.numeric(train$crime)
# plot the lda results
plot(lda.fit, dimen = 2, col = classes, pch = classes)
lda.arrows(lda.fit, myscale = 1)
Predict:
# predict classes with test data
lda.pred <- predict(lda.fit, newdata = test)
# cross tabulate the results
table(correct = correct_classes, predicted = lda.pred$class)
## predicted
## correct low med_low med_high high
## low 15 12 1 0
## med_low 4 15 4 0
## med_high 0 8 11 3
## high 0 0 1 28
insprited from https://stackoverflow.com/a/64539733
library('caret')
cm <- confusionMatrix(factor(lda.pred$class), factor(correct_classes), dnn = c("Prediction", "Reference"))
ggplot(as.data.frame(cm$table), aes(Prediction,sort(Reference,decreasing = T), fill= Freq)) +
geom_tile() + geom_text(aes(label=Freq)) +
scale_fill_gradient(low="white", high="#009193") +
labs(x = "Reference",y = "Prediction") +
scale_x_discrete(labels=c('low','med_low','med_high','high')) +
scale_y_discrete(labels=c('high', 'med_high', 'med_low', 'low'))
Overall accuracy shows the overall number of correctly classified labels over the incorrect ones.
code inspritred from https://stackoverflow.com/a/24349171
all_accuracies= cm$overall
all_accuracies
## Accuracy Kappa AccuracyLower AccuracyUpper AccuracyNull
## 6.764706e-01 5.681293e-01 5.766288e-01 7.657576e-01 2.843137e-01
## AccuracyPValue McnemarPValue
## 2.617624e-16 NaN
all_accuracies['Accuracy']*100
## Accuracy
## 67.64706
all_accuracies['Kappa']*100
## Kappa
## 56.81293
The confusing matirx (graph) shows that for example 25 of class high were predicted correctly as high, but one of the misclassified to med_high.
As the confusion matrix shows, most f high were classified to high.
Task 7: Reload the dataset and calculate the distances between the observations.
# load MASS and Boston
library(MASS)
data('Boston')
# euclidean distance matrix
dist_eu <- dist(Boston)
# look at the summary of the distances
summary(dist_eu)
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 1.119 85.624 170.539 226.315 371.950 626.047
# manhattan distance matrix
dist_man <- dist(Boston, method = 'manhattan')
# look at the summary of the distances
summary(dist_man)
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 2.016 149.145 279.505 342.899 509.707 1198.265
As you can see, mean of the distance between variables is 342.899 (2.02 and 1198.26 as min and max, respectively).
Let’s cluster dataset to 3 classes, in a way that the homogeneity within cluster would be more than between clusters. Clusters are identified by the assessment of the relative distances between points, and, in this example, the relative homogeneity of each cluster and the degree of separation between the clusters makes the task very simple (Source: Multivariate Analysis for the Behavioral Sciences).
km <-kmeans(Boston, centers = 3)
# plot the Boston dataset with clusters
pairs(Boston, col = km$cluster)
These pairs of plots show if we cluster the dataset to 3 clusters, then how differentically/discriminately can the variables be from each other.
For example, tax and rad, tax and black, or tax and lstat columns were cluster-able visually. Interestingly, these columns were shown some high correlation between each other.
Another example, the correlation between tax and rad is 0.91 (see below). Therefore, I plot the correlation again in below. FYI: we can plot them in 3D to see visually better. More info: check bonus exercises.
cor(Boston$tax, Boston$rad)
## [1] 0.9102282
# visualize the correlation matrix
corrplot(cor_matrix, method="circle", type="upper",
cl.pos="b", tl.pos="d", tl.cex = 0.7)
# Investigate the optimal number of clusters
set.seed(1)
# determine the number of clusters
k_max <- 10
# calculate the total within sum of squares
twcss <- sapply(1:k_max, function(k){kmeans(Boston, k)$tot.withinss})
# visualize the results
qplot(x = 1:k_max, y = twcss, geom = 'line', xlab= "number of clusters", ylab="Within groups sum of squares")
As the results show in the figure, the total within sum of squares declines sharply by increasing the number of K’s, but after 5, is gets almost steady, so something like 4 or 5 number of clusters is enough.
# k-means clustering
km <-kmeans(Boston, centers = 2) #number of clusters
km
## K-means clustering with 2 clusters of sizes 369, 137
##
## Cluster means:
## crim zn indus chas nox rm age dis
## 1 0.3887744 15.58266 8.420894 0.07317073 0.5118474 6.388005 60.63225 4.441272
## 2 12.2991617 0.00000 18.451825 0.05839416 0.6701022 6.006212 89.96788 2.054470
## rad tax ptratio black lstat medv
## 1 4.455285 311.9268 17.80921 381.0426 10.41745 24.85718
## 2 23.270073 667.6423 20.19635 291.0391 18.67453 16.27226
##
## Clustering vector:
## 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
## 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
## 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
## 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
## 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60
## 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
## 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
## 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
## 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100
## 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
## 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120
## 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
## 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140
## 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
## 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160
## 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
## 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
## 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
## 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200
## 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
## 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220
## 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
## 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240
## 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
## 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260
## 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
## 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280
## 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
## 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300
## 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
## 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320
## 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
## 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340
## 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
## 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360
## 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2
## 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380
## 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2
## 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400
## 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2
## 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420
## 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2
## 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440
## 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2
## 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460
## 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2
## 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480
## 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2
## 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500
## 2 2 2 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1
## 501 502 503 504 505 506
## 1 1 1 1 1 1
##
## Within cluster sum of squares by cluster:
## [1] 2868770 2896224
## (between_SS / total_SS = 70.3 %)
##
## Available components:
##
## [1] "cluster" "centers" "totss" "withinss" "tot.withinss"
## [6] "betweenss" "size" "iter" "ifault"
Here I analyse different number of clusters by changing the centers = 2) and report below the Within cluster sum of squares by cluster.
By looking at the code, I understood that within cluster sum of squares by cluster is calculated by dividing between_SS to total_SS. So, let’s write that to our loop to get that printed. Hence, I wrote the code to print this as it loops.
centers_list = c(2, 3,4,5,6,7,8,9,10,15,20)
for (i in centers_list){
print('.........')
print(i)
km <-kmeans(Boston, centers = i)
#print(km)
print(km$betweenss/km$totss*100)
}
## [1] "........."
## [1] 2
## [1] 70.28516
## [1] "........."
## [1] 3
## [1] 84.18386
## [1] "........."
## [1] 4
## [1] 90.64774
## [1] "........."
## [1] 5
## [1] 79.60841
## [1] "........."
## [1] 6
## [1] 93.57224
## [1] "........."
## [1] 7
## [1] 94.17725
## [1] "........."
## [1] 8
## [1] 82.01612
## [1] "........."
## [1] 9
## [1] 95.79905
## [1] "........."
## [1] 10
## [1] 82.72624
## [1] "........."
## [1] 15
## [1] 97.35675
## [1] "........."
## [1] 20
## [1] 97.86162
The above number are the number of cluster, and within cluster sum of squares by cluster.
Interpretaion Althogh the accuracy improves as the number of clusters increases, it seems 4 clusters to be fine, because accuracy improved dramatically until there, then get almost steady.
Notably, I think this depends on application and aim of each project. For example, my personal experience in forest mapping using aerial imagery, the increase of number of clusters, often led to horrible maps, thus somewhere between 3-5 is good.
Thus, as explained above, we can use 4 number of clusters to not get too complicated and doable in this exercise. I run this clustering again to get it saved in R object.
km <-kmeans(Boston, centers = 4)
# plot the Boston dataset with clusters
pairs(Boston, col = km$cluster)
As figure shows, and explained above, using some columns like tax and black, tax and dist are easily separable.
inspired from https://www.r-bloggers.com/2020/05/practical-guide-to-k-means-clustering/ This can be considered as an alternative way.
wssplot <- function(Boston, nc=15, seed=1234){
wss <- (nrow(Boston)-1)*sum(apply(Boston,2,var))
for (i in 2:nc){
set.seed(seed)
wss[i] <- sum(kmeans(Boston, centers=i)$withinss)}
plot(1:nc, wss, type="b", xlab="Number of Clusters",
ylab="Within groups sum of squares")}
# plotting values for each cluster starting from 1 to 9
wssplot(Boston, nc = 9)
Interpretation: As the plot shows, the total within-cluster sum of square declines as number of clusters increases. However, gets steady after 5, Thus, selecting 4 or 5 number of clusters sounds optimal.
library("factoextra")
## Welcome! Want to learn more? See two factoextra-related books at https://goo.gl/ve3WBa
my_data <- scale(Boston)
clus_plot = fviz_nbclust(my_data, kmeans, method = "wss") #method = "wss" is total within sum of square
clus_plot
fviz_nbclust function determines and visualize the optimal number of clusters using different methods: within cluster sums of squares.
model_predictors <- dplyr::select(train, -crime)
# check the dimensions
dim(model_predictors)
## [1] 404 13
dim(lda.fit$scaling)
## [1] 13 3
# matrix multiplication
library("plotly")
##
## Attaching package: 'plotly'
## The following object is masked from 'package:MASS':
##
## select
## The following object is masked from 'package:ggplot2':
##
## last_plot
## The following object is masked from 'package:stats':
##
## filter
## The following object is masked from 'package:graphics':
##
## layout
library("colorspace")
#red <- hex(HLS(0, 0.5, 1))
matrix_product <- as.matrix(model_predictors) %*% lda.fit$scaling
matrix_product <- as.data.frame(matrix_product)
plot_ly(x = matrix_product$LD1, y = matrix_product$LD2,
z = matrix_product$LD3, type= 'scatter3d', mode='markers',
color = train$crime, colors = c("blue", "yellow", "black", "red"))
## Warning: `arrange_()` is deprecated as of dplyr 0.7.0.
## Please use `arrange()` instead.
## See vignette('programming') for more help
## This warning is displayed once every 8 hours.
## Call `lifecycle::last_warnings()` to see where this warning was generated.
Please feel free to Zoom on and out on the 3D figure :) As you can see, we can see and distingush the 4 classes by zooming in the above 3D figure.
require(plot3D)
## Loading required package: plot3D
attach(mtcars)
## The following object is masked from package:ggplot2:
##
## mpg
scatter3D(x = matrix_product$LD1, y = matrix_product$LD2,
z = matrix_product$LD3, pch = 18, cex = 2,
theta = 15, phi = 20, #by chaning the theta and phi, change your view angle to 3d plot
ticktype = "detailed",
xlab = "LD1", ylab = "LD2", zlab = "LD3", clab = "",
colkey = list(length = 0.8, width = 0.4),
main = "3D scatter of clusters", col=c("blue", "yellow", "black", "red"))
Technical note: by chaning the theta and phi, change your view angle to 3d plot.
From this view, we can see that it’s clear to discriminate two cluster from this view (one cluster in right side and other one in left side), however, if we were able to rotate this plot, we could rotate and see other side to discuss more about the 4 possible clusters visually. I made it possible in below extra coding.
inspired from http://www.sthda.com/english/wiki/impressive-package-for-3d-and-4d-graph-r-software-and-data-visualization
# Make the rgl version
library("plot3Drgl")
## Loading required package: rgl
plotrgl()
Please feel free to Zoom on and out on the 3D figure :)
Technical note: You should run your 3d plot then run this code to plot it in interactive zoomable way.
We can see that it is very easy to make 3-4 clusters visually.
FYI: You can even plot a histogram of all columns in 3D and zoom in it: hist3D(z=as.matrix(test))
# plot the lda results
plot(lda.fit, dimen = 3, col = classes, pch = classes)
So, we can see that it is very easy to make 2 clusters visually. and even possible to 4 clusters.
inspired from https://www.r-bloggers.com/2020/05/practical-guide-to-k-means-clustering/
#Related to bouns 2: coloring
library(factoextra)
fviz_cluster(km, data = Boston, main = "Partitioning Clustering Plot")
Technical note: fviz_cluster provides ggplot2-based elegant visualization of partitioning methods including kmeans.
Since the graph is 2D, we cannot rotate it or zoom to see other views of the graph. But generally, it looks good!
options(knitr.duplicate.label = "allow")
#to sovel following error (mentioned below), I put this chunk to run and solved the error:
#error: Error in parse_block(g[-1], g[1], params.src, markdown_mode) : Duplicate chunk label 'setup', which has been used for the chunk: knitr::opts_chunk$set(echo = TRUE) Calls: <Anonymous> ... process_file -> split_file -> lapply -> FUN -> parse_block
#insprited from: https://bookdown.org/yihui/rmarkdown-cookbook/duplicate-label.html
Welcome to my page
This is the learning diary for the course ‘Introduction to Open Data Science 2020’. I would like to further develop my data science skills in R and get familiar with Github.
My GitHub repository is https://github.com/Imangholiloo/IODS-project
# Let's get the date and time
date()
## [1] "Wed Nov 18 19:30:16 2020"
alternatively, print(Sys.time())
Mohamamd Imangholiloo
Note: this was done to prepare for next week
#Read the “Human development” file into R
hd <- read.csv("http://s3.amazonaws.com/assets.datacamp.com/production/course_2218/datasets/human_development.csv", stringsAsFactors = F)
#Read the “Gender inequality” file into R
gii <- read.csv("http://s3.amazonaws.com/assets.datacamp.com/production/course_2218/datasets/gender_inequality.csv", stringsAsFactors = F, na.strings = "..")
For data decription and more info please visit:
http://hdr.undp.org/en/content/human-development-index-hdi and http://hdr.undp.org/sites/default/files/hdr2015_technical_notes.pdf
#strcutre of data
str(hd)
## 'data.frame': 195 obs. of 8 variables:
## $ HDI.Rank : int 1 2 3 4 5 6 6 8 9 9 ...
## $ Country : chr "Norway" "Australia" "Switzerland" "Denmark" ...
## $ Human.Development.Index..HDI. : num 0.944 0.935 0.93 0.923 0.922 0.916 0.916 0.915 0.913 0.913 ...
## $ Life.Expectancy.at.Birth : num 81.6 82.4 83 80.2 81.6 80.9 80.9 79.1 82 81.8 ...
## $ Expected.Years.of.Education : num 17.5 20.2 15.8 18.7 17.9 16.5 18.6 16.5 15.9 19.2 ...
## $ Mean.Years.of.Education : num 12.6 13 12.8 12.7 11.9 13.1 12.2 12.9 13 12.5 ...
## $ Gross.National.Income..GNI..per.Capita: chr "64,992" "42,261" "56,431" "44,025" ...
## $ GNI.per.Capita.Rank.Minus.HDI.Rank : int 5 17 6 11 9 11 16 3 11 23 ...
#dimension of file (row* column)
dim(hd)
## [1] 195 8
#summary of file
summary(hd)
## HDI.Rank Country Human.Development.Index..HDI.
## Min. : 1.00 Length:195 Min. :0.3480
## 1st Qu.: 47.75 Class :character 1st Qu.:0.5770
## Median : 94.00 Mode :character Median :0.7210
## Mean : 94.31 Mean :0.6918
## 3rd Qu.:141.25 3rd Qu.:0.8000
## Max. :188.00 Max. :0.9440
## NA's :7
## Life.Expectancy.at.Birth Expected.Years.of.Education Mean.Years.of.Education
## Min. :49.00 Min. : 4.10 Min. : 1.400
## 1st Qu.:65.75 1st Qu.:11.10 1st Qu.: 5.550
## Median :73.10 Median :13.10 Median : 8.400
## Mean :71.07 Mean :12.86 Mean : 8.079
## 3rd Qu.:76.80 3rd Qu.:14.90 3rd Qu.:10.600
## Max. :84.00 Max. :20.20 Max. :13.100
##
## Gross.National.Income..GNI..per.Capita GNI.per.Capita.Rank.Minus.HDI.Rank
## Length:195 Min. :-84.0000
## Class :character 1st Qu.: -9.0000
## Mode :character Median : 1.5000
## Mean : 0.1862
## 3rd Qu.: 11.0000
## Max. : 47.0000
## NA's :7
#column names
colnames(hd)
## [1] "HDI.Rank"
## [2] "Country"
## [3] "Human.Development.Index..HDI."
## [4] "Life.Expectancy.at.Birth"
## [5] "Expected.Years.of.Education"
## [6] "Mean.Years.of.Education"
## [7] "Gross.National.Income..GNI..per.Capita"
## [8] "GNI.per.Capita.Rank.Minus.HDI.Rank"
#see head and tail of the data
head(hd)
## HDI.Rank Country Human.Development.Index..HDI. Life.Expectancy.at.Birth
## 1 1 Norway 0.944 81.6
## 2 2 Australia 0.935 82.4
## 3 3 Switzerland 0.930 83.0
## 4 4 Denmark 0.923 80.2
## 5 5 Netherlands 0.922 81.6
## 6 6 Germany 0.916 80.9
## Expected.Years.of.Education Mean.Years.of.Education
## 1 17.5 12.6
## 2 20.2 13.0
## 3 15.8 12.8
## 4 18.7 12.7
## 5 17.9 11.9
## 6 16.5 13.1
## Gross.National.Income..GNI..per.Capita GNI.per.Capita.Rank.Minus.HDI.Rank
## 1 64,992 5
## 2 42,261 17
## 3 56,431 6
## 4 44,025 11
## 5 45,435 9
## 6 43,919 11
tail(hd)
## HDI.Rank Country Human.Development.Index..HDI.
## 190 NA East Asia and the Pacific 0.710
## 191 NA Europe and Central Asia 0.748
## 192 NA Latin America and the Caribbean 0.748
## 193 NA South Asia 0.607
## 194 NA Sub-Saharan Africa 0.518
## 195 NA World 0.711
## Life.Expectancy.at.Birth Expected.Years.of.Education
## 190 74.0 12.7
## 191 72.3 13.6
## 192 75.0 14.0
## 193 68.4 11.2
## 194 58.5 9.6
## 195 71.5 12.2
## Mean.Years.of.Education Gross.National.Income..GNI..per.Capita
## 190 7.5 11,449
## 191 10.0 12,791
## 192 8.2 14,242
## 193 5.5 5,605
## 194 5.2 3,363
## 195 7.9 14,301
## GNI.per.Capita.Rank.Minus.HDI.Rank
## 190 NA
## 191 NA
## 192 NA
## 193 NA
## 194 NA
## 195 NA
Let’s do the same for gii dataset
#strcutre of data
str(gii)
## 'data.frame': 195 obs. of 10 variables:
## $ GII.Rank : int 1 2 3 4 5 6 6 8 9 9 ...
## $ Country : chr "Norway" "Australia" "Switzerland" "Denmark" ...
## $ Gender.Inequality.Index..GII. : num 0.067 0.11 0.028 0.048 0.062 0.041 0.113 0.28 0.129 0.157 ...
## $ Maternal.Mortality.Ratio : int 4 6 6 5 6 7 9 28 11 8 ...
## $ Adolescent.Birth.Rate : num 7.8 12.1 1.9 5.1 6.2 3.8 8.2 31 14.5 25.3 ...
## $ Percent.Representation.in.Parliament : num 39.6 30.5 28.5 38 36.9 36.9 19.9 19.4 28.2 31.4 ...
## $ Population.with.Secondary.Education..Female.: num 97.4 94.3 95 95.5 87.7 96.3 80.5 95.1 100 95 ...
## $ Population.with.Secondary.Education..Male. : num 96.7 94.6 96.6 96.6 90.5 97 78.6 94.8 100 95.3 ...
## $ Labour.Force.Participation.Rate..Female. : num 61.2 58.8 61.8 58.7 58.5 53.6 53.1 56.3 61.6 62 ...
## $ Labour.Force.Participation.Rate..Male. : num 68.7 71.8 74.9 66.4 70.6 66.4 68.1 68.9 71 73.8 ...
#dimension of file (row* column)
dim(gii)
## [1] 195 10
#summary of file
summary(gii)
## GII.Rank Country Gender.Inequality.Index..GII.
## Min. : 1.00 Length:195 Min. :0.0160
## 1st Qu.: 47.75 Class :character 1st Qu.:0.2030
## Median : 94.00 Mode :character Median :0.3935
## Mean : 94.31 Mean :0.3695
## 3rd Qu.:141.25 3rd Qu.:0.5272
## Max. :188.00 Max. :0.7440
## NA's :7 NA's :33
## Maternal.Mortality.Ratio Adolescent.Birth.Rate
## Min. : 1.0 Min. : 0.60
## 1st Qu.: 16.0 1st Qu.: 15.45
## Median : 69.0 Median : 40.95
## Mean : 163.2 Mean : 49.55
## 3rd Qu.: 230.0 3rd Qu.: 71.78
## Max. :1100.0 Max. :204.80
## NA's :10 NA's :5
## Percent.Representation.in.Parliament
## Min. : 0.00
## 1st Qu.:12.47
## Median :19.50
## Mean :20.60
## 3rd Qu.:27.02
## Max. :57.50
## NA's :3
## Population.with.Secondary.Education..Female.
## Min. : 0.9
## 1st Qu.: 27.8
## Median : 55.7
## Mean : 54.8
## 3rd Qu.: 81.8
## Max. :100.0
## NA's :26
## Population.with.Secondary.Education..Male.
## Min. : 3.20
## 1st Qu.: 38.30
## Median : 60.00
## Mean : 60.29
## 3rd Qu.: 85.80
## Max. :100.00
## NA's :26
## Labour.Force.Participation.Rate..Female.
## Min. :13.50
## 1st Qu.:44.50
## Median :53.30
## Mean :52.61
## 3rd Qu.:62.62
## Max. :88.10
## NA's :11
## Labour.Force.Participation.Rate..Male.
## Min. :44.20
## 1st Qu.:68.88
## Median :75.55
## Mean :74.74
## 3rd Qu.:80.15
## Max. :95.50
## NA's :11
#column names
colnames(gii)
## [1] "GII.Rank"
## [2] "Country"
## [3] "Gender.Inequality.Index..GII."
## [4] "Maternal.Mortality.Ratio"
## [5] "Adolescent.Birth.Rate"
## [6] "Percent.Representation.in.Parliament"
## [7] "Population.with.Secondary.Education..Female."
## [8] "Population.with.Secondary.Education..Male."
## [9] "Labour.Force.Participation.Rate..Female."
## [10] "Labour.Force.Participation.Rate..Male."
#see head and tail of the data
head(gii)
## GII.Rank Country Gender.Inequality.Index..GII. Maternal.Mortality.Ratio
## 1 1 Norway 0.067 4
## 2 2 Australia 0.110 6
## 3 3 Switzerland 0.028 6
## 4 4 Denmark 0.048 5
## 5 5 Netherlands 0.062 6
## 6 6 Germany 0.041 7
## Adolescent.Birth.Rate Percent.Representation.in.Parliament
## 1 7.8 39.6
## 2 12.1 30.5
## 3 1.9 28.5
## 4 5.1 38.0
## 5 6.2 36.9
## 6 3.8 36.9
## Population.with.Secondary.Education..Female.
## 1 97.4
## 2 94.3
## 3 95.0
## 4 95.5
## 5 87.7
## 6 96.3
## Population.with.Secondary.Education..Male.
## 1 96.7
## 2 94.6
## 3 96.6
## 4 96.6
## 5 90.5
## 6 97.0
## Labour.Force.Participation.Rate..Female.
## 1 61.2
## 2 58.8
## 3 61.8
## 4 58.7
## 5 58.5
## 6 53.6
## Labour.Force.Participation.Rate..Male.
## 1 68.7
## 2 71.8
## 3 74.9
## 4 66.4
## 5 70.6
## 6 66.4
tail(gii)
## GII.Rank Country Gender.Inequality.Index..GII.
## 190 NA East Asia and the Pacific 0.328
## 191 NA Europe and Central Asia 0.300
## 192 NA Latin America and the Caribbean 0.415
## 193 NA South Asia 0.536
## 194 NA Sub-Saharan Africa 0.575
## 195 NA World 0.449
## Maternal.Mortality.Ratio Adolescent.Birth.Rate
## 190 72 21.2
## 191 28 30.8
## 192 85 68.3
## 193 183 38.7
## 194 506 109.7
## 195 210 47.4
## Percent.Representation.in.Parliament
## 190 18.7
## 191 19.0
## 192 27.0
## 193 17.5
## 194 22.5
## 195 21.8
## Population.with.Secondary.Education..Female.
## 190 54.7
## 191 70.8
## 192 54.3
## 193 29.1
## 194 22.1
## 195 54.5
## Population.with.Secondary.Education..Male.
## 190 66.3
## 191 80.6
## 192 55.2
## 193 54.6
## 194 31.5
## 195 65.4
## Labour.Force.Participation.Rate..Female.
## 190 62.6
## 191 45.6
## 192 53.7
## 193 29.8
## 194 65.4
## 195 50.3
## Labour.Force.Participation.Rate..Male.
## 190 79.4
## 191 70.0
## 192 79.8
## 193 80.3
## 194 76.6
## 195 76.7
Task 4: rename the variables with (shorter) descriptive names
colnames(hd)
## [1] "HDI.Rank"
## [2] "Country"
## [3] "Human.Development.Index..HDI."
## [4] "Life.Expectancy.at.Birth"
## [5] "Expected.Years.of.Education"
## [6] "Mean.Years.of.Education"
## [7] "Gross.National.Income..GNI..per.Capita"
## [8] "GNI.per.Capita.Rank.Minus.HDI.Rank"
colnames(hd) <- c('HDI_Rank', 'Country', 'HDI', 'Life_Expec_Birth',
'Expec_yr_Edu', 'Mean_yr_Edu', 'GNI_per_Cap',
'GNI_per_Cap_Min_HDI_Rank')
colnames(hd)
## [1] "HDI_Rank" "Country"
## [3] "HDI" "Life_Expec_Birth"
## [5] "Expec_yr_Edu" "Mean_yr_Edu"
## [7] "GNI_per_Cap" "GNI_per_Cap_Min_HDI_Rank"
So you can see that column names were shorten.
Let’s do the same fo gii dataset
colnames(gii)
## [1] "GII.Rank"
## [2] "Country"
## [3] "Gender.Inequality.Index..GII."
## [4] "Maternal.Mortality.Ratio"
## [5] "Adolescent.Birth.Rate"
## [6] "Percent.Representation.in.Parliament"
## [7] "Population.with.Secondary.Education..Female."
## [8] "Population.with.Secondary.Education..Male."
## [9] "Labour.Force.Participation.Rate..Female."
## [10] "Labour.Force.Participation.Rate..Male."
colnames(gii) <- c('GII_Rank', 'Country', 'GII', 'Mortality_R',
'Adolescent_Birth_R', 'Representation_Parliament',
'Sec_Edu_Fem','Sec_Edu_Mal',
"Lab_Forc_Particip_R_Fem",
"Lab_Forc_Particip_R_Mal" )
colnames(gii)
## [1] "GII_Rank" "Country"
## [3] "GII" "Mortality_R"
## [5] "Adolescent_Birth_R" "Representation_Parliament"
## [7] "Sec_Edu_Fem" "Sec_Edu_Mal"
## [9] "Lab_Forc_Particip_R_Fem" "Lab_Forc_Particip_R_Mal"
So you can see that column names were shorten.
Task 5. Mutate the “Gender inequality” data and create two new variables
library("dplyr")
gii <- mutate(gii, edu_R = (Sec_Edu_Fem/Sec_Edu_Mal))
head(gii)
## GII_Rank Country GII Mortality_R Adolescent_Birth_R
## 1 1 Norway 0.067 4 7.8
## 2 2 Australia 0.110 6 12.1
## 3 3 Switzerland 0.028 6 1.9
## 4 4 Denmark 0.048 5 5.1
## 5 5 Netherlands 0.062 6 6.2
## 6 6 Germany 0.041 7 3.8
## Representation_Parliament Sec_Edu_Fem Sec_Edu_Mal Lab_Forc_Particip_R_Fem
## 1 39.6 97.4 96.7 61.2
## 2 30.5 94.3 94.6 58.8
## 3 28.5 95.0 96.6 61.8
## 4 38.0 95.5 96.6 58.7
## 5 36.9 87.7 90.5 58.5
## 6 36.9 96.3 97.0 53.6
## Lab_Forc_Particip_R_Mal edu_R
## 1 68.7 1.0072389
## 2 71.8 0.9968288
## 3 74.9 0.9834369
## 4 66.4 0.9886128
## 5 70.6 0.9690608
## 6 66.4 0.9927835
gii <- mutate(gii, Lab_Forc_R = (Lab_Forc_Particip_R_Fem/Lab_Forc_Particip_R_Mal))
head(gii)
## GII_Rank Country GII Mortality_R Adolescent_Birth_R
## 1 1 Norway 0.067 4 7.8
## 2 2 Australia 0.110 6 12.1
## 3 3 Switzerland 0.028 6 1.9
## 4 4 Denmark 0.048 5 5.1
## 5 5 Netherlands 0.062 6 6.2
## 6 6 Germany 0.041 7 3.8
## Representation_Parliament Sec_Edu_Fem Sec_Edu_Mal Lab_Forc_Particip_R_Fem
## 1 39.6 97.4 96.7 61.2
## 2 30.5 94.3 94.6 58.8
## 3 28.5 95.0 96.6 61.8
## 4 38.0 95.5 96.6 58.7
## 5 36.9 87.7 90.5 58.5
## 6 36.9 96.3 97.0 53.6
## Lab_Forc_Particip_R_Mal edu_R Lab_Forc_R
## 1 68.7 1.0072389 0.8908297
## 2 71.8 0.9968288 0.8189415
## 3 74.9 0.9834369 0.8251001
## 4 66.4 0.9886128 0.8840361
## 5 70.6 0.9690608 0.8286119
## 6 66.4 0.9927835 0.8072289
Instead of using mutate, this is also possible.
gii$edu_R <- gii$Sec_Edu_Fem/gii$Sec_Edu_Mal
Task 6: Join together the two datasets using the variable Country as the identifier.
TIP: we use inner join to keep only observations in both data sets.
hd_gii_joint <- inner_join(hd, gii,
by = 'Country', suffix = c("_hd", "_gi"))
head(hd_gii_joint)
## HDI_Rank Country HDI Life_Expec_Birth Expec_yr_Edu Mean_yr_Edu
## 1 1 Norway 0.944 81.6 17.5 12.6
## 2 2 Australia 0.935 82.4 20.2 13.0
## 3 3 Switzerland 0.930 83.0 15.8 12.8
## 4 4 Denmark 0.923 80.2 18.7 12.7
## 5 5 Netherlands 0.922 81.6 17.9 11.9
## 6 6 Germany 0.916 80.9 16.5 13.1
## GNI_per_Cap GNI_per_Cap_Min_HDI_Rank GII_Rank GII Mortality_R
## 1 64,992 5 1 0.067 4
## 2 42,261 17 2 0.110 6
## 3 56,431 6 3 0.028 6
## 4 44,025 11 4 0.048 5
## 5 45,435 9 5 0.062 6
## 6 43,919 11 6 0.041 7
## Adolescent_Birth_R Representation_Parliament Sec_Edu_Fem Sec_Edu_Mal
## 1 7.8 39.6 97.4 96.7
## 2 12.1 30.5 94.3 94.6
## 3 1.9 28.5 95.0 96.6
## 4 5.1 38.0 95.5 96.6
## 5 6.2 36.9 87.7 90.5
## 6 3.8 36.9 96.3 97.0
## Lab_Forc_Particip_R_Fem Lab_Forc_Particip_R_Mal edu_R Lab_Forc_R
## 1 61.2 68.7 1.0072389 0.8908297
## 2 58.8 71.8 0.9968288 0.8189415
## 3 61.8 74.9 0.9834369 0.8251001
## 4 58.7 66.4 0.9886128 0.8840361
## 5 58.5 70.6 0.9690608 0.8286119
## 6 53.6 66.4 0.9927835 0.8072289
dim(hd_gii_joint)
## [1] 195 19
setwd("./data")
write.csv(hd_gii_joint, "human.csv")
(more chapters to be added similarly as we proceed with the course!)